Seperate out tests and benchmarks

这个提交包含在:
nmannall
2024-01-22 16:46:04 +00:00
父节点 6fd8a27d4f
当前提交 bed5ae79c3
共有 6 个文件被更改,包括 195 次插入465 次删除

查看文件

@@ -1,12 +1,13 @@
"""ReFrame base classes for GprMax tests""" """ReFrame base classes for GprMax tests"""
import os import os
import pathlib
import reframe as rfm import reframe as rfm
import reframe.utility.sanity as sn import reframe.utility.sanity as sn
from reframe.core.builtins import performance_function, require_deps, run_after, sanity_function
from reframe.utility import udeps from reframe.utility import udeps
from configuration.user_config import GPRMAX_ROOT_DIR GPRMAX_ROOT_DIR = pathlib.Path(__file__).parent.parent.resolve()
PATH_TO_PYENV = os.path.join(".venv", "bin", "activate") PATH_TO_PYENV = os.path.join(".venv", "bin", "activate")
@@ -19,7 +20,7 @@ class CreatePyenvTest(rfm.RunOnlyRegressionTest):
prerun_cmds = [ prerun_cmds = [
"python -m venv --system-site-packages --prompt gprMax .venv", "python -m venv --system-site-packages --prompt gprMax .venv",
f"source {PATH_TO_PYENV}", f"source {PATH_TO_PYENV}",
f"pip install -r {os.path.join(GPRMAX_ROOT_DIR, 'requirements.txt')}" f"pip install -r {os.path.join(GPRMAX_ROOT_DIR, 'requirements.txt')}",
] ]
executable = f"pip install -e {GPRMAX_ROOT_DIR}" executable = f"pip install -e {GPRMAX_ROOT_DIR}"
@@ -29,11 +30,13 @@ class CreatePyenvTest(rfm.RunOnlyRegressionTest):
Check packages successfully installed from requirements.txt Check packages successfully installed from requirements.txt
Check gprMax installed successfully and no other errors thrown Check gprMax installed successfully and no other errors thrown
""" """
return sn.assert_found(r"Successfully installed (?!gprMax)", self.stdout, "Failed to install requirements") \ return (
and sn.assert_found(r"Successfully installed gprMax", self.stdout, "Failed to install gprMax") \ sn.assert_found(r"Successfully installed (?!gprMax)", self.stdout, "Failed to install requirements")
and sn.assert_not_found(r"finished with status 'error'", self.stdout) \ and sn.assert_found(r"Successfully installed gprMax", self.stdout, "Failed to install gprMax")
and sn.assert_not_found(r"ERROR:", self.stderr) and sn.assert_not_found(r"finished with status 'error'", self.stdout)
and sn.assert_not_found(r"ERROR:", self.stderr)
)
class GprmaxBaseTest(rfm.RunOnlyRegressionTest): class GprmaxBaseTest(rfm.RunOnlyRegressionTest):
valid_systems = ["archer2:compute"] valid_systems = ["archer2:compute"]
@@ -56,44 +59,39 @@ class GprmaxBaseTest(rfm.RunOnlyRegressionTest):
"""Add prerun command to load the built Python environment""" """Add prerun command to load the built Python environment"""
path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV) path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV)
self.prerun_cmds.append(f"source {path_to_pyenv}") self.prerun_cmds.append(f"source {path_to_pyenv}")
@sanity_function @sanity_function
def test_simulation_complete(self): def test_simulation_complete(self):
"""Check simulation completed successfully""" """Check simulation completed successfully"""
# TODO: Check for correctness/regression rather than just completing # TODO: Check for correctness/regression rather than just completing
return sn.assert_found(r"=== Simulation completed in ", self.stdout) return sn.assert_found(r"=== Simulation completed in ", self.stdout)
@performance_function('s', perf_key='run_time') @performance_function("s", perf_key="run_time")
def extract_run_time(self): def extract_run_time(self):
"""Extract total runtime""" """Extract total runtime"""
return sn.extractsingle( return sn.extractsingle(r"real\s+(?P<run_time>\S+)", self.stderr, "run_time", float)
r'real\s+(?P<run_time>\S+)',
self.stderr, @performance_function("s", perf_key="simulation_time")
"run_time",
float
)
@performance_function('s', perf_key='simulation_time')
def extract_simulation_time(self): def extract_simulation_time(self):
"""Extract simulation time reported by gprMax""" """Extract simulation time reported by gprMax"""
# sn.extractall throws an error if a group has value None. # sn.extractall throws an error if a group has value None.
# Therefore have to handle the < 1 min and >= 1 min cases separately. # Therefore have to handle the < 1 min and >= 1 min cases separately.
if sn.extractsingle(r"=== Simulation completed in \S+ (?P<case>minute|seconds)", self.stdout, "case") == "minute": if (
sn.extractsingle(r"=== Simulation completed in \S+ (?P<case>minute|seconds)", self.stdout, "case")
== "minute"
):
simulation_time = sn.extractall( simulation_time = sn.extractall(
r"=== Simulation completed in (?P<minutes>\S+) minutes? and (?P<seconds>\S+) seconds =*", r"=== Simulation completed in (?P<minutes>\S+) minutes? and (?P<seconds>\S+) seconds =*",
self.stdout, self.stdout,
["minutes", "seconds"], ["minutes", "seconds"],
float float,
) )
minutes = simulation_time[0][0] minutes = simulation_time[0][0]
seconds = simulation_time[0][1] seconds = simulation_time[0][1]
else: else:
minutes = 0 minutes = 0
seconds = sn.extractsingle( seconds = sn.extractsingle(
r"=== Simulation completed in (?P<seconds>\S+) seconds =*", r"=== Simulation completed in (?P<seconds>\S+) seconds =*", self.stdout, "seconds", float
self.stdout,
"seconds",
float
) )
return minutes * 60 + seconds return minutes * 60 + seconds

查看文件

@@ -1,158 +1,125 @@
site_configuration = { site_configuration = {
'systems': [ "systems": [
{ {
'name': 'archer2', "name": "archer2",
'descr': 'ARCHER2', "descr": "ARCHER2",
'hostnames': ['uan','ln','dvn'], "hostnames": ["uan", "ln", "dvn"],
'modules_system': 'lmod', "modules_system": "lmod",
'partitions': [ "partitions": [
{ {
'name': 'login', "name": "login",
'descr': 'Login nodes', "descr": "Login nodes",
'scheduler': 'local', "scheduler": "local",
'launcher': 'local', "launcher": "local",
'environs': ['PrgEnv-gnu','PrgEnv-cray','PrgEnv-aocc'], "environs": ["PrgEnv-gnu", "PrgEnv-cray", "PrgEnv-aocc"],
}, },
{ {
'name': 'compute', "name": "compute",
'descr': 'Compute nodes', "descr": "Compute nodes",
'scheduler': 'slurm', "scheduler": "slurm",
'launcher': 'srun', "launcher": "srun",
'access': ['--hint=nomultithread','--distribution=block:block','--partition=standard','--qos=standard'], "access": [
'environs': ['PrgEnv-gnu','PrgEnv-cray','PrgEnv-aocc'], "--hint=nomultithread",
'max_jobs': 16, "--distribution=block:block",
} "--partition=standard",
] "--qos=standard",
}
],
'environments': [
{
'name': 'PrgEnv-gnu',
'modules': ['PrgEnv-gnu'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
{
'name': 'PrgEnv-cray',
'modules': ['PrgEnv-cray'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
{
'name': 'PrgEnv-aocc',
'modules': ['PrgEnv-aocc'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
],
'logging': [
{
'level': 'debug',
'handlers': [
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s'
},
{
'type': 'file',
'name': 'reframe.out',
'level': 'info',
'format': '[%(asctime)s] %(check_info)s: %(message)s',
'append': True
},
{
'type': 'file',
'name': 'reframe.log',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s %(levelno)s: %(check_info)s: %(message)s', # noqa: E501
'append': False
}
],
'handlers_perflog': [
{
'type': 'file',
'name': 'reframe_perf.out',
'level': 'info',
'format': '[%(asctime)s] %(check_info)s %(check_perfvalues)s',
'format_perfvars': '| %(check_perf_var)s: %(check_perf_value)s %(check_perf_unit)s (r: %(check_perf_ref)s l: %(check_perf_lower_thres)s u: %(check_perf_upper_thres)s) ',
'append': True
},
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_result)s, %(check_job_completion_time)s, '
'%(check_name)s, %(check_short_name)s, %(check_jobid)s, '
'%(check_num_tasks)s, %(check_num_cpus_per_task)s, %(check_num_tasks_per_node)s, '
'%(check_#ALL)s' # Any remaining loggable test attributes should be test parameters
),
'ignore_keys': [
'check_build_locally',
'check_build_time_limit',
'check_descr',
'check_display_name',
'check_env_vars',
'check_exclusive_access',
'check_executable',
'check_executable_opts',
'check_extra_resources',
'check_hashcode',
'check_job_completion_time_unix',
'check_job_exitcode',
'check_job_nodelist',
'check_job_submit_time',
'check_jobid',
'check_keep_files',
'check_local',
'check_maintainers',
'check_max_pending_time',
'check_modules',
'check_name',
'check_num_cpus_per_task',
'check_num_gpus_per_node',
'check_num_tasks',
'check_num_tasks_per_core',
'check_num_tasks_per_node',
'check_num_tasks_per_socket',
'check_outputdir',
'check_partition',
'check_prebuild_cmds',
'check_prefix',
'check_prerun_cmds',
'check_postbuild_cmds',
'check_postrun_cmds',
'check_readonly_files',
'check_short_name',
'check_sourcepath',
'check_sourcesdir',
'check_stagedir',
'check_strict_check',
'check_system',
'check_tags',
'check_time_limit',
'check_unique_name',
'check_use_multithreading',
'check_valid_prog_environs',
'check_valid_systems',
'check_variables'
], ],
'format_perfvars': ( "environs": ["PrgEnv-gnu", "PrgEnv-cray", "PrgEnv-aocc"],
'%(check_perf_value)s|%(check_perf_unit)s|' "max_jobs": 16,
'%(check_perf_ref)s|%(check_perf_lower_thres)s|' },
'%(check_perf_upper_thres)s|' ],
),
'append': True
}
]
} }
], ],
} "environments": [
{
"name": "PrgEnv-gnu",
"modules": ["PrgEnv-gnu"],
"cc": "cc",
"cxx": "CC",
"ftn": "ftn",
"target_systems": ["archer2"],
},
{
"name": "PrgEnv-cray",
"modules": ["PrgEnv-cray"],
"cc": "cc",
"cxx": "CC",
"ftn": "ftn",
"target_systems": ["archer2"],
},
{
"name": "PrgEnv-aocc",
"modules": ["PrgEnv-aocc"],
"cc": "cc",
"cxx": "CC",
"ftn": "ftn",
"target_systems": ["archer2"],
},
],
"logging": [
{
"level": "debug",
"handlers": [
{"type": "stream", "name": "stdout", "level": "info", "format": "%(message)s"},
{
"type": "file",
"name": "reframe.out",
"level": "info",
"format": "[%(asctime)s] %(check_info)s: %(message)s",
"append": True,
},
{
"type": "file",
"name": "reframe.log",
"level": "debug",
"format": "[%(asctime)s] %(levelname)s %(levelno)s: %(check_info)s: %(message)s", # noqa: E501
"append": False,
},
],
"handlers_perflog": [
{
"type": "file",
"name": "reframe_perf.out",
"level": "info",
"format": "[%(asctime)s] %(check_info)s job_id=%(check_jobid)s %(check_perfvalues)s",
"format_perfvars": "| %(check_perf_var)s: %(check_perf_value)s %(check_perf_unit)s (r: %(check_perf_ref)s l: %(check_perf_lower_thres)s u: %(check_perf_upper_thres)s) ",
"append": True,
},
{
"type": "filelog",
"prefix": "%(check_system)s/%(check_partition)s",
"level": "info",
"format": (
"%(check_result)s,%(check_job_completion_time)s,"
"%(check_info)s,%(check_jobid)s,"
"%(check_num_tasks)s,%(check_num_cpus_per_task)s,%(check_num_tasks_per_node)s,"
"%(check_perfvalues)s"
),
"format_perfvars": (
"%(check_perf_value)s,%(check_perf_unit)s,"
"%(check_perf_ref)s,%(check_perf_lower_thres)s,"
"%(check_perf_upper_thres)s,"
),
"append": True,
},
{
"type": "filelog",
"prefix": "%(check_system)s/%(check_partition)s/latest",
"level": "info",
"format": (
"%(check_result)s,%(check_job_completion_time)s,"
"%(check_info)s,%(check_jobid)s,"
"%(check_num_tasks)s,%(check_num_cpus_per_task)s,%(check_num_tasks_per_node)s,"
"%(check_perfvalues)s"
),
"format_perfvars": (
"%(check_perf_value)s,%(check_perf_unit)s,"
"%(check_perf_ref)s,%(check_perf_lower_thres)s,"
"%(check_perf_upper_thres)s,"
),
"append": False,
},
],
}
],
}

38
tests/reframe_benchmarks.py 普通文件
查看文件

@@ -0,0 +1,38 @@
import reframe as rfm
from reframe.core.builtins import parameter, run_after
from base_tests import GprmaxBaseTest
"""ReFrame tests for performance benchmarking
Usage:
cd gprMax/tests
reframe -C configuraiton/{CONFIG_FILE} -c reframe_benchmarks.py -c base_tests.py -r
"""
@rfm.simple_test
class BenchmarkTest(GprmaxBaseTest):
tags = {"benchmark", "single node", "openmp"}
num_tasks = 1
omp_threads = parameter([1, 2, 4, 8, 16, 32, 64, 128])
domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
time_limit = "4h"
@run_after("init")
def setup_omp(self):
self.num_cpus_per_task = self.omp_threads
super().setup_omp()
@run_after("init")
def create_model_file(self):
input_file = f"benchmark_model_{self.domain}.in"
self.executable_opts = [input_file]
self.keep_files = [input_file]
@run_after("init")
def set_cpu_freq(self):
self.env_vars["SLURM_CPU_FREQ_REQ"] = 2250000

查看文件

@@ -1,10 +1,8 @@
import reframe as rfm import reframe as rfm
from reframe.core.builtins import parameter
from base_tests import GprmaxBaseTest from base_tests import GprmaxBaseTest
from reframe.core.builtins import parameter, run_after
"""ReFrame tests for basic functionality
"""ReFrame tests for benchmarking and basic functionality
Usage: Usage:
cd gprMax/tests cd gprMax/tests
@@ -14,6 +12,7 @@ from base_tests import GprmaxBaseTest
@rfm.simple_test @rfm.simple_test
class BScanTest(GprmaxBaseTest): class BScanTest(GprmaxBaseTest):
tags = {"test", "mpi", "taskfarm"}
executable_opts = "cylinder_Bscan_2D.in -n 64 -mpi".split() executable_opts = "cylinder_Bscan_2D.in -n 64 -mpi".split()
num_tasks = 8 num_tasks = 8
@@ -22,18 +21,21 @@ class BScanTest(GprmaxBaseTest):
@rfm.simple_test @rfm.simple_test
class BasicModelsTest(GprmaxBaseTest): class BasicModelsTest(GprmaxBaseTest):
tags = {"test", "serial"}
# List of available basic test models # List of available basic test models
model = parameter([ model = parameter(
"2D_ExHyHz", [
"2D_EyHxHz", "2D_ExHyHz",
"2D_EzHxHy", "2D_EyHxHz",
"cylinder_Ascan_2D", "2D_EzHxHy",
"hertzian_dipole_fs", "cylinder_Ascan_2D",
"hertzian_dipole_hs", "hertzian_dipole_fs",
"hertzian_dipole_dispersive", "hertzian_dipole_hs",
"magnetic_dipole_fs", "hertzian_dipole_dispersive",
]) "magnetic_dipole_fs",
]
)
num_cpus_per_task = 16 num_cpus_per_task = 16
@run_after("init") @run_after("init")
@@ -43,27 +45,3 @@ class BasicModelsTest(GprmaxBaseTest):
self.executable_opts = [input_file, "-o", output_file] self.executable_opts = [input_file, "-o", output_file]
self.postrun_cmds = [f"python -m toolboxes.Plotting.plot_Ascan -save {output_file}"] self.postrun_cmds = [f"python -m toolboxes.Plotting.plot_Ascan -save {output_file}"]
self.keep_files = [input_file, output_file, f"{self.model}.pdf"] self.keep_files = [input_file, output_file, f"{self.model}.pdf"]
@rfm.simple_test
class BenchmarkTest(GprmaxBaseTest):
num_tasks = 1
omp_threads = parameter([1, 2, 4, 8, 16, 32, 64, 128])
domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
time_limit = "4h"
@run_after("init")
def setup_omp(self):
self.num_cpus_per_task = self.omp_threads
super().setup_omp()
@run_after("init")
def create_model_file(self):
input_file = f"benchmark_model_{self.domain}.in"
self.executable_opts = [input_file]
self.keep_files = [input_file]
@run_after("init")
def set_cpu_freq(self):
self.env_vars["SLURM_CPU_FREQ_REQ"] = 2250000

查看文件

@@ -1,64 +0,0 @@
"""A series of models with different domain sizes used for benchmarking.
The domain is free space with a simple source (Hertzian Dipole) and
receiver at the centre.
"""
import os
from pathlib import Path
import pytest
import gprMax
# Cube side lengths (in cells) for different domains
DOMAINS = [0.10, 0.15, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80]
# Number of OpenMP threads to benchmark each domain size
OMP_THREADS = [1, 2, 4, 8, 16, 32, 64, 128]
# Discretisation
dl = 0.001
@pytest.mark.parametrize("domain", DOMAINS)
@pytest.mark.parametrize("omp_threads", OMP_THREADS)
def test_simple_benchmarks(request, benchmark, domain, omp_threads):
output_dir = Path(os.path.dirname(request.fspath), "tmp", request.node.name)
os.makedirs(output_dir, exist_ok=True)
output_filepath = output_dir / "model.h5"
# Domain
x = domain
y = x
z = x
scene = gprMax.Scene()
title = gprMax.Title(name=request.node.name)
domain = gprMax.Domain(p1=(x, y, z))
dxdydz = gprMax.Discretisation(p1=(dl, dl, dl))
time_window = gprMax.TimeWindow(time=3e-9)
wv = gprMax.Waveform(wave_type="gaussiandotnorm", amp=1, freq=900e6, id="MySource")
src = gprMax.HertzianDipole(p1=(x / 2, y / 2, z / 2), polarisation="x", waveform_id="MySource")
rx = gprMax.Rx(p1=(x / 4, y / 4, z / 4))
omp = gprMax.OMPThreads(n=omp_threads)
scenes = []
scene.add(title)
scene.add(domain)
scene.add(dxdydz)
scene.add(time_window)
scene.add(wv)
scene.add(src)
scene.add(omp)
scene.add(rx)
scenes.append(scene)
# Run benchmark once (i.e. 1 round)
benchmark.pedantic(gprMax.run, kwargs={'scenes': scenes, 'n': len(scenes), 'geometry_only': False, 'outputfile': output_filepath, 'gpu': None})
# Automatically choose number of rounds.
# benchmark(gprMax.run, scenes=scenes, n=len(scenes), geometry_only=False, outputfile=output_filepath, gpu=None)

查看文件

@@ -1,187 +0,0 @@
# Copyright (C) 2015-2023: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import sys
from pathlib import Path
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pytest
import gprMax
from testing.analytical_solutions import hertzian_dipole_fs
from tests.utilities.data import get_data_from_h5_file, calculate_diffs
from tests.utilities.plotting import plot_dataset_comparison, plot_diffs
from gprMax.utilities.logging import logging_config
logger = logging.getLogger(__name__)
logging_config(name=__name__)
if sys.platform == "linux":
plt.switch_backend("agg")
"""Compare field outputs
Usage:
cd gprMax
pytest tests/test_models.py
"""
# Specify directory containing basic models to test
BASIC_MODELS_DIRECTORY = Path(__file__).parent / "data" / "models_basic"
# List of available basic test models
BASIC_MODELS = [
"2D_ExHyHz",
"2D_EyHxHz",
"2D_EzHxHy",
"cylinder_Ascan_2D",
"hertzian_dipole_fs",
"hertzian_dipole_hs",
"hertzian_dipole_dispersive",
"magnetic_dipole_fs",
]
# Specify directory containing analytical models to test
ANALYTICAL_MODELS_DIRECTORY = Path(__file__).parent / "data" / "models_analytical"
# List of available analytical models
ANALYTICAL_MODELS = ["hertzian_dipole_fs_analytical"]
FIELD_COMPONENTS_BASE_PATH = "/rxs/rx1/"
def create_ascan_comparison_plots(test_time, test_data, ref_time, ref_data, model_name, output_base):
fig1 = plot_dataset_comparison(test_time, test_data, ref_time, ref_data, model_name)
fig1.savefig(output_base.with_suffix(".png"), dpi=150, format="png", bbox_inches="tight", pad_inches=0.1)
# Required to correctly calculate diffs
assert test_time.shape == ref_time.shape
assert np.all(test_time == ref_time)
assert test_data.shape == ref_data.shape
data_diffs = calculate_diffs(test_data, ref_data)
fig2 = plot_diffs(test_time, data_diffs)
fig2.savefig(Path(f"{output_base}_diffs.png"), dpi=150, format="png", bbox_inches="tight", pad_inches=0.1)
logger.info(f"Output data folder: {output_base.parent}")
plt.close(fig1)
plt.close(fig2)
def run_test(model_name, input_base, data_directory, analytical_func=None, gpu=None, opencl=None):
input_filepath = input_base.with_suffix(".in")
reference_filepath = Path(f"{input_base}_ref.h5")
output_base = data_directory / model_name
output_filepath = output_base.with_suffix(".h5")
# Run model
gprMax.run(inputfile=input_filepath, outputfile=output_filepath, gpu=gpu, opencl=opencl)
test_time, test_data = get_data_from_h5_file(output_filepath)
if analytical_func is not None:
ref_time = test_time
ref_data = analytical_func(output_filepath)
else:
ref_time, ref_data = get_data_from_h5_file(reference_filepath)
create_ascan_comparison_plots(test_time, test_data, ref_time, ref_data, model_name, output_base)
data_diffs = calculate_diffs(test_data, ref_data)
max_diff = round(np.max(data_diffs), 2)
assert max_diff <= 0
def run_regression_test(request, ndarrays_regression, model_name, input_base, data_directory, gpu=None, opencl=None):
input_filepath = input_base.with_suffix(".in")
output_dir = Path(os.path.dirname(request.fspath), "tmp", request.node.name)
os.makedirs(output_dir, exist_ok=True)
output_base = output_dir / model_name
output_filepath = output_base.with_suffix(".h5")
data_base = data_directory / request.node.name
reference_filepath = data_base.with_suffix(".npz")
# Run model
gprMax.run(inputfile=input_filepath, outputfile=output_filepath, gpu=gpu, opencl=opencl)
test_time, test_data = get_data_from_h5_file(output_filepath)
# May not exist if first time running the regression test
if os.path.exists(reference_filepath):
reference_file = np.load(reference_filepath)
ref_time = reference_file["time"]
ref_data = reference_file["data"]
create_ascan_comparison_plots(test_time, test_data, ref_time, ref_data, model_name, output_base)
ndarrays_regression.check({"time": test_time, "data": test_data}, basename=os.path.relpath(data_base, data_directory))
def calc_hertzian_dipole_fs_analytical_solution(filepath):
with h5py.File(filepath, "r") as file:
# Tx/Rx position to feed to analytical solution
rx_pos = file[FIELD_COMPONENTS_BASE_PATH].attrs["Position"]
tx_pos = file["/srcs/src1/"].attrs["Position"]
rx_pos_relative = ((rx_pos[0] - tx_pos[0]), (rx_pos[1] - tx_pos[1]), (rx_pos[2] - tx_pos[2]))
# Analytical solution of a dipole in free space
data = hertzian_dipole_fs(
file.attrs["Iterations"], file.attrs["dt"], file.attrs["dx_dy_dz"], rx_pos_relative
)
return data
@pytest.mark.parametrize("model", BASIC_MODELS)
def test_basic_models(model, datadir):
base_filepath = Path(BASIC_MODELS_DIRECTORY, model, model)
run_test(model, base_filepath, datadir)
@pytest.mark.parametrize("model", ANALYTICAL_MODELS)
def test_analyitical_models(datadir, model):
base_filepath = Path(ANALYTICAL_MODELS_DIRECTORY, model)
run_test(model, base_filepath, datadir, analytical_func=calc_hertzian_dipole_fs_analytical_solution)
@pytest.mark.parametrize("model", BASIC_MODELS)
def test_basic_models_regression(request, ndarrays_regression, datadir, model):
base_filepath = Path(BASIC_MODELS_DIRECTORY, model, model)
run_regression_test(request, ndarrays_regression, model, base_filepath, datadir)
@pytest.mark.parametrize("model", ANALYTICAL_MODELS)
def test_analytical_models_regression(request, ndarrays_regression, datadir, model):
base_filepath = Path(ANALYTICAL_MODELS_DIRECTORY, model)
run_regression_test(request, ndarrays_regression, model, base_filepath, datadir)