Merge branch 'mpi' into 37-hdf5-geometry-views

这个提交包含在:
nmannall
2024-11-26 13:59:53 +00:00
当前提交 8ca45710ff
共有 89 个文件被更改,包括 778 次插入363 次删除

查看文件

@@ -0,0 +1,11 @@
cpu_freq,model,num_cpus_per_task,num_nodes,num_tasks,num_tasks_per_node,run_time,simulation_time
2000000,benchmark_model_40,16,1,8,8,147.94,61.11
2000000,benchmark_model_40,16,16,128,8,74.45,16.03
2000000,benchmark_model_40,16,2,16,8,108.6,45.41
2000000,benchmark_model_40,16,4,32,8,92.18,35.0
2000000,benchmark_model_40,16,8,64,8,73.71,16.56
2250000,benchmark_model_40,16,1,8,8,171.95,53.94
2250000,benchmark_model_40,16,16,128,8,58.13,12.04
2250000,benchmark_model_40,16,2,16,8,97.73,38.73
2250000,benchmark_model_40,16,4,32,8,87.61,28.54
2250000,benchmark_model_40,16,8,64,8,68.29,14.47
1 cpu_freq model num_cpus_per_task num_nodes num_tasks num_tasks_per_node run_time simulation_time
2 2000000 benchmark_model_40 16 1 8 8 147.94 61.11
3 2000000 benchmark_model_40 16 16 128 8 74.45 16.03
4 2000000 benchmark_model_40 16 2 16 8 108.6 45.41
5 2000000 benchmark_model_40 16 4 32 8 92.18 35.0
6 2000000 benchmark_model_40 16 8 64 8 73.71 16.56
7 2250000 benchmark_model_40 16 1 8 8 171.95 53.94
8 2250000 benchmark_model_40 16 16 128 8 58.13 12.04
9 2250000 benchmark_model_40 16 2 16 8 97.73 38.73
10 2250000 benchmark_model_40 16 4 32 8 87.61 28.54
11 2250000 benchmark_model_40 16 8 64 8 68.29 14.47

查看文件

@@ -0,0 +1,17 @@
cpu_freq,model,mpi_tasks,num_cpus_per_task,num_tasks,num_tasks_per_node,run_time,simulation_time
2000000,benchmark_model_40,1,128,1,1,397.64,294.58
2000000,benchmark_model_40,128,1,128,128,129.22,68.77
2000000,benchmark_model_40,16,8,16,16,104.83,64.91
2000000,benchmark_model_40,2,64,2,2,192.89,151.06
2000000,benchmark_model_40,32,4,32,32,101.99,63.86
2000000,benchmark_model_40,4,32,4,4,119.14,80.95
2000000,benchmark_model_40,64,2,64,64,105.57,61.03
2000000,benchmark_model_40,8,16,8,8,102.26,58.24
2250000,benchmark_model_40,1,128,1,1,348.95,241.2
2250000,benchmark_model_40,128,1,128,128,118.04,66.21
2250000,benchmark_model_40,16,8,16,16,106.06,61.8
2250000,benchmark_model_40,2,64,2,2,189.82,140.84
2250000,benchmark_model_40,32,4,32,32,99.12,60.65
2250000,benchmark_model_40,4,32,4,4,117.36,76.9
2250000,benchmark_model_40,64,2,64,64,108.8,58.79
2250000,benchmark_model_40,8,16,8,8,94.92,55.32
1 cpu_freq model mpi_tasks num_cpus_per_task num_tasks num_tasks_per_node run_time simulation_time
2 2000000 benchmark_model_40 1 128 1 1 397.64 294.58
3 2000000 benchmark_model_40 128 1 128 128 129.22 68.77
4 2000000 benchmark_model_40 16 8 16 16 104.83 64.91
5 2000000 benchmark_model_40 2 64 2 2 192.89 151.06
6 2000000 benchmark_model_40 32 4 32 32 101.99 63.86
7 2000000 benchmark_model_40 4 32 4 4 119.14 80.95
8 2000000 benchmark_model_40 64 2 64 64 105.57 61.03
9 2000000 benchmark_model_40 8 16 8 8 102.26 58.24
10 2250000 benchmark_model_40 1 128 1 1 348.95 241.2
11 2250000 benchmark_model_40 128 1 128 128 118.04 66.21
12 2250000 benchmark_model_40 16 8 16 16 106.06 61.8
13 2250000 benchmark_model_40 2 64 2 2 189.82 140.84
14 2250000 benchmark_model_40 32 4 32 32 99.12 60.65
15 2250000 benchmark_model_40 4 32 4 4 117.36 76.9
16 2250000 benchmark_model_40 64 2 64 64 108.8 58.79
17 2250000 benchmark_model_40 8 16 8 8 94.92 55.32

查看文件

@@ -1,7 +1,12 @@
import reframe as rfm
import os
from pathlib import Path
import numpy as np
from primePy import primes
from reframe import simple_test
from reframe.core.builtins import parameter, run_after
from reframe_tests.tests.base_tests import GprMaxRegressionTest
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
"""ReFrame tests for performance benchmarking
@@ -11,14 +16,43 @@ from reframe_tests.tests.base_tests import GprMaxRegressionTest
"""
@rfm.simple_test
def calculate_mpi_decomposition(number: int):
factors: list[int] = primes.factors(number)
if len(factors) < 3:
factors += [1] * (3 - len(factors))
elif len(factors) > 3:
base = factors[-3:]
factors = factors[:-3]
for factor in reversed(factors): # Use the largest factors first
min_index = np.argmin(base)
base[min_index] *= factor
factors = base
return sorted(factors)
@simple_test
class SingleNodeBenchmark(GprMaxRegressionTest):
tags = {"benchmark", "single node", "openmp"}
omp_threads = parameter([1, 2, 4, 8, 16, 32, 64, 128])
domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
# domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
cpu_freq = parameter([2000000, 2250000])
time_limit = "8h"
sourcesdir = "src"
model = parameter(
[
"benchmark_model_10",
"benchmark_model_15",
"benchmark_model_20",
"benchmark_model_30",
"benchmark_model_40",
"benchmark_model_50",
"benchmark_model_60",
"benchmark_model_70",
"benchmark_model_80",
]
)
@run_after("init")
def setup_env_vars(self):
@@ -26,8 +60,110 @@ class SingleNodeBenchmark(GprMaxRegressionTest):
self.env_vars["SLURM_CPU_FREQ_REQ"] = self.cpu_freq
super().setup_env_vars()
@run_after("init")
def set_model_file(self):
input_file = f"benchmark_model_{self.domain}.in"
self.executable_opts = [input_file]
self.keep_files = [input_file]
@simple_test
class SingleNodeMPIBenchmark(GprMaxRegressionTest):
tags = {"benchmark", "mpi", "openmp", "single node"}
mpi_tasks = parameter([1, 2, 4, 8, 16, 32, 64, 128, 256])
# domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
cpu_freq = parameter([2000000, 2250000])
model = parameter(["benchmark_model_40"])
sourcesdir = "src"
time_limit = "1h"
@run_after("setup")
def setup_env_vars(self):
cpus_per_node = self.current_partition.processor.num_cpus
self.skip_if(
cpus_per_node < self.mpi_tasks,
f"Insufficient CPUs per node ({cpus_per_node}) to run test with at least {self.mpi_tasks} processors",
)
self.num_cpus_per_task = cpus_per_node // self.mpi_tasks
self.num_tasks = cpus_per_node // self.num_cpus_per_task
self.num_tasks_per_node = self.num_tasks
self.extra_executable_opts = [
"--mpi",
*map(str, calculate_mpi_decomposition(self.num_tasks)),
]
self.env_vars["SLURM_CPU_FREQ_REQ"] = self.cpu_freq
super().setup_env_vars()
@simple_test
class MPIStrongScalingBenchmark(GprMaxRegressionTest):
tags = {"benchmark", "mpi", "openmp"}
num_nodes = parameter([1, 2, 4, 8, 16])
# domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
cpu_freq = parameter([2000000, 2250000])
time_limit = "8h"
sourcesdir = "src"
model = parameter(["benchmark_model_40"])
# serial_dependency = SingleNodeBenchmark
# mpi_layout = parameter([[1, 1, 1]]) # parameter([[2, 2, 2], [4, 4, 4], [6, 6, 6]])
def build_reference_filepath(self, suffix: str = "") -> str:
filename = (
f"MPIWeakScalingBenchmark_{suffix}" if len(suffix) > 0 else "MPIWeakScalingBenchmark"
)
reference_file = Path("regression_checks", filename).with_suffix(".h5")
return os.path.abspath(reference_file)
@run_after("setup")
def setup_env_vars(self):
cpus_per_node = self.current_partition.processor.num_cpus
self.num_cpus_per_task = 16
self.num_tasks_per_node = cpus_per_node // self.num_cpus_per_task
self.num_tasks = self.num_tasks_per_node * self.num_nodes
self.extra_executable_opts = [
"--mpi",
*map(str, calculate_mpi_decomposition(self.num_tasks)),
]
self.env_vars["SLURM_CPU_FREQ_REQ"] = self.cpu_freq
super().setup_env_vars()
@simple_test
class MPIWeakScalingBenchmark(GprMaxRegressionTest):
tags = {"benchmark", "mpi", "openmp"}
num_nodes = parameter([1, 2, 4, 8, 16])
# domain = parameter([0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
cpu_freq = parameter([2000000, 2250000])
time_limit = "8h"
sourcesdir = "src"
model = parameter(["benchmark_model_40"])
def build_reference_filepath(self, suffix: str = "") -> str:
filename = (
f"MPIStrongScalingBenchmark_{suffix}_{self.num_nodes}"
if len(suffix) > 0
else f"MPIStrongScalingBenchmark_{self.num_nodes}"
)
reference_file = Path("regression_checks", filename).with_suffix(".h5")
return os.path.abspath(reference_file)
@run_after("setup")
def setup_env_vars(self):
cpus_per_node = self.current_partition.processor.num_cpus
self.num_cpus_per_task = 16
self.num_tasks_per_node = cpus_per_node // self.num_cpus_per_task
self.num_tasks = self.num_tasks_per_node * self.num_nodes
size = 0.4
scale_factor = calculate_mpi_decomposition(self.num_nodes)
self.prerun_cmds.append(
f'sed -i "s/#domain: 0.4 0.4 0.4/#domain: {size * scale_factor[0]} {size * scale_factor[1]} {size * scale_factor[2]}/g" {self.model}.in'
)
self.extra_executable_opts = [
"--mpi",
*map(str, calculate_mpi_decomposition(self.num_tasks)),
]
self.env_vars["SLURM_CPU_FREQ_REQ"] = self.cpu_freq
super().setup_env_vars()

查看文件

@@ -32,6 +32,11 @@ site_configuration = {
],
"environs": ["PrgEnv-gnu", "PrgEnv-cray", "PrgEnv-aocc"],
"max_jobs": 16,
"processor": {
"num_cpus": 128,
"num_cpus_per_socket": 64,
"num_sockets": 2,
},
},
],
}

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

二进制文件未显示。

查看文件

@@ -2,17 +2,15 @@
Usage (run all tests):
cd gprMax/reframe_tests
reframe -C configuraiton/{CONFIG_FILE} -c tests/ -r
reframe -C configuration/{CONFIG_FILE} -c tests/ -r
"""
import os
from pathlib import Path
from shutil import copyfile
from typing import Literal
from typing import Literal, Optional, Union
import reframe.utility.sanity as sn
import reframe.utility.typecheck as typ
from numpy import prod
from reframe import RunOnlyRegressionTest, simple_test
from reframe.core.builtins import (
parameter,
@@ -24,18 +22,24 @@ from reframe.core.builtins import (
sanity_function,
variable,
)
from reframe.utility import osext, udeps
from reframe.core.exceptions import DependencyError
from reframe.utility import udeps
from gprMax.receivers import Rx
from reframe_tests.tests.regression_checks import RegressionCheck
from reframe_tests.utilities.deferrable import path_join
TESTS_ROOT_DIR = Path(__file__).parent
GPRMAX_ROOT_DIR = Path(__file__).parent.parent.parent.resolve()
PATH_TO_PYENV = os.path.join(".venv", "bin", "activate")
@simple_test
class CreatePyenvTest(RunOnlyRegressionTest):
"""Create a fresh virtual environment for running the tests.
The test checks for any errors from pip installing gprMax and its
dependencies.
"""
valid_systems = ["generic", "archer2:login"]
valid_prog_environs = ["builtin", "PrgEnv-gnu"]
modules = ["cray-python"]
@@ -50,7 +54,7 @@ class CreatePyenvTest(RunOnlyRegressionTest):
@run_after("init")
def install_system_specific_dependencies(self):
"""Install additional dependencies for specific systems"""
"""Install additional dependencies for specific systems."""
if self.current_system.name == "archer2":
"""
Needed to prevent a pip install error.
@@ -74,9 +78,11 @@ class CreatePyenvTest(RunOnlyRegressionTest):
@sanity_function
def check_requirements_installed(self):
"""
Check packages successfully installed from requirements.txt
Check gprMax installed successfully and no other errors thrown
"""Check packages were successfully installed.
Check pip is up to date and gprMax dependencies from
requirements.txt were successfully installed. Check gprMax was
installed successfully and no other errors were thrown.
"""
return (
sn.assert_found(
@@ -97,7 +103,22 @@ class CreatePyenvTest(RunOnlyRegressionTest):
)
class GprMaxRegressionTest(RunOnlyRegressionTest):
class GprMaxBaseTest(RunOnlyRegressionTest):
"""Base class that all GprMax tests should inherit from.
Test functionality can be augmented by using Mixin classes.
Attributes:
model (parameter[str]): ReFrame parameter to specify the model
name(s).
sourcesdir (str): Relative path to the test's src directory.
regression_checks (list[RegressionCheck]): List of regression
checks to perform.
test_dependency (type[GprMaxBaseTest] | None): Optional test
dependency. If specified, regression checks will use
reference files created by the test dependency.
"""
valid_systems = ["archer2:compute"]
valid_prog_environs = ["PrgEnv-gnu"]
modules = ["cray-python"]
@@ -106,18 +127,110 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
exclusive_access = True
model = parameter()
is_antenna_model = variable(bool, value=False)
has_receiver_output = variable(bool, value=True)
snapshots = variable(typ.List[str], value=[])
sourcesdir = required
extra_executable_opts = variable(typ.List[str], value=[])
executable = "time -p python -m gprMax --log-level 10 --hide-progress-bars"
executable = "time -p python -m gprMax"
rx_outputs = variable(typ.List[str], value=Rx.defaultoutputs)
regression_checks = variable(typ.List[RegressionCheck], value=[])
# TODO: Make this a ReFrame variable
# Not currently possible as ReFrame does not think an object of type
# reframe.core.meta.RegressionTestMeta is copyable, and so ReFrame
# test classes cannot be specified in a variable.
test_dependency: Optional[type["GprMaxBaseTest"]] = None
# test_dependency = variable(type(None), type, value=None)
def get_test_dependency_variant_name(self, **kwargs) -> Optional[str]:
"""Get unique ReFrame name of the test dependency variant.
By default, filter test dependencies by the model name.
Args:
**kwargs: Additional key-value pairs to filter the parameter
space of the test dependency. The key is the test
parameter name and the value is either a single value or
a unary function that evaluates to True if the parameter
point must be kept, False otherwise.
Returns:
variant_name: Unique name of the test dependency variant.
"""
if self.test_dependency is None:
return None
# Always filter by the model parameter, but allow child classes
# (or mixins) to override how models are filtered.
kwargs.setdefault("model", self.model)
variant_nums = self.test_dependency.get_variant_nums(**kwargs)
if len(variant_nums) < 1:
raise DependencyError(
f"No variant of '{self.test_dependency.__name__}' meets conditions: {kwargs}",
)
return self.test_dependency.variant_name(variant_nums[0])
def get_test_dependency(self) -> Optional["GprMaxBaseTest"]:
"""Get correct ReFrame test case from the test dependency.
Returns:
test_case: ReFrame test case.
"""
variant = self.get_test_dependency_variant_name()
if variant is None:
return None
else:
return self.getdep(variant)
def build_reference_filepath(self, name: Union[str, os.PathLike]) -> Path:
"""Build path to the specified reference file.
Reference files are saved in directories per test case. If this
test does not specify a test dependency, it will save and manage
its own reference files in its own directory. Otherwise, it will
use reference files saved by its test dependency.
Args:
name: Name of the file.
Returns:
filepath: Absolute path to the reference file.
"""
target = self.get_test_dependency()
if target is None:
reference_dir = self.short_name
else:
reference_dir = target.short_name
reference_file = Path("regression_checks", reference_dir, name).with_suffix(".h5")
return reference_file.absolute()
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency
@run_after("init")
def inject_dependencies(self):
"""Specify test dependencies.
All tests depend on the Python virtual environment building
correctly and their own test dependency if specified.
"""
self.depends_on("CreatePyenvTest", udeps.by_env)
if self.test_dependency is not None:
variant = self.get_test_dependency_variant_name()
self.depends_on(variant, udeps.by_env)
@require_deps
def get_pyenv_path(self, CreatePyenvTest):
"""Add prerun command to load the built Python environment."""
path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV)
self.prerun_cmds.append(f"source {path_to_pyenv}")
@run_after("init")
def setup_env_vars(self):
"""Set OMP_NUM_THREADS environment variable from num_cpus_per_task"""
"""Set necessary environment variables.
Set OMP_NUM_THREADS environment variable from num_cpus_per_task
and other system specific varaibles.
"""
self.env_vars["OMP_NUM_THREADS"] = self.num_cpus_per_task
if self.current_system.name == "archer2":
@@ -128,47 +241,35 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
# Set the matplotlib cache to the work filesystem
self.env_vars["MPLCONFIGDIR"] = "${HOME/home/work}/.config/matplotlib"
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency
@run_after("init")
def inject_dependencies(self):
"""Test depends on the Python virtual environment building correctly"""
self.depends_on("CreatePyenvTest", udeps.by_env)
def set_file_paths(self):
"""Set default test input and output files.
@require_deps
def get_pyenv_path(self, CreatePyenvTest):
"""Add prerun command to load the built Python environment"""
path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV)
self.prerun_cmds.append(f"source {path_to_pyenv}")
def build_reference_filepath(self, suffix: str = "") -> str:
filename = f"{self.short_name}_{suffix}" if len(suffix) > 0 else self.short_name
reference_file = Path("regression_checks", filename).with_suffix(".h5")
return os.path.abspath(reference_file)
def build_snapshot_filepath(self, snapshot: str) -> str:
return os.path.join(f"{self.model}_snaps", snapshot)
@run_after("setup")
def setup_reference_files(self):
"""Build reference file paths"""
self.reference_file = self.build_reference_filepath()
self.snapshot_reference_files = []
for snapshot in self.snapshots:
self.snapshot_reference_files.append(self.build_reference_filepath(snapshot))
@run_after("setup", always_last=True)
def configure_test_run(self, input_file_ext: str = ".in"):
"""Configure gprMax commandline arguments and plot outputs
Set the input and output files and add postrun commands to plot
the outputs.
These are set in a post-init hook to allow mixins to use them
later in the pipeline.
"""
self.input_file = f"{self.model}{input_file_ext}"
self.output_file = f"{self.model}.h5"
self.executable_opts = [self.input_file, "-o", self.output_file]
self.executable_opts += self.extra_executable_opts
self.keep_files = [self.input_file, *self.snapshots]
self.input_file = Path(f"{self.model}.in")
self.output_file = Path(f"{self.model}.h5")
@run_before("run")
def configure_test_run(self):
"""Configure gprMax commandline arguments and files to keep."""
input_file = str(self.input_file)
output_file = str(self.output_file)
self.executable_opts += [
input_file,
"-o",
output_file,
"--log-level",
"10",
"--hide-progress-bars",
]
regression_output_files = [str(r.output_file) for r in self.regression_checks]
self.keep_files += [input_file, output_file, *regression_output_files]
"""
if self.has_receiver_output:
self.postrun_cmds = [
f"python -m reframe_tests.utilities.plotting {self.output_file} {self.reference_file} -m {self.model}"
@@ -186,10 +287,11 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
antenna_t1_params,
antenna_ant_params,
]
"""
@run_before("run")
def combine_task_outputs(self):
"""Split output from each MPI rank
"""Split output from each MPI rank.
If running with multiple MPI ranks, split the output into
seperate files and add postrun commands to combine the files
@@ -208,23 +310,17 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
self.postrun_cmds.append(f"cat out/{stdout}_*.out >> {self.stdout}")
self.postrun_cmds.append(f"cat err/{stderr}_*.err >> {self.stderr}")
# @run_before("run")
# def check_input_file_exists(self):
# """Skip test if input file does not exist"""
# # Current working directory will be where the reframe job was launched
# # However reframe assumes the source directory is relative to the test file
# with osext.change_dir(TESTS_ROOT_DIR):
# self.skip_if(
# not os.path.exists(self.sourcesdir),
# f"Source directory '{self.sourcesdir}' does not exist. Current working directory: '{os.getcwd()}'",
# )
# self.skip_if(
# not os.path.exists(os.path.join(self.sourcesdir, self.input_file)),
# f"Input file '{self.input_file}' not present in source directory '{self.sourcesdir}'",
# )
def test_simulation_complete(self) -> Literal[True]:
"""Check simulation completed successfully"""
"""Check simulation completed successfully.
Returns:
simulation_completed: Returns True if the simulation
completed, otherwise it fails the test.
Raises:
reframe.core.exceptions.SanityError: If the simulation did
not complete.
"""
return sn.assert_not_found(
r"(?i)error",
self.stderr,
@@ -233,110 +329,73 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
r"=== Simulation completed in ", self.stdout, "Simulation did not complete"
)
def run_regression_check(
self, output_file: str, reference_file: str, error_msg: str
) -> Literal[True]:
"""Compare two provided .h5 files using h5diff
def test_reference_files_exist(self) -> Literal[True]:
"""Check all reference files exist and create any missing ones.
Args:
output_file: Filepath of .h5 file output by the test.
reference_file: Filepath of reference .h5 file containing
the expected output.
Returns:
files_exist: Returns True if all reference files exist,
otherwise it fails the test.
Raises:
reframe.core.exceptions.SanityError: If any reference files
do not exist.
"""
if self.current_system.name == "archer2":
h5diff = "/opt/cray/pe/hdf5/default/bin/h5diff"
else:
h5diff = "h5diff"
h5diff_output = osext.run_command([h5diff, os.path.abspath(output_file), reference_file])
return sn.assert_false(
h5diff_output.stdout,
(
f"{error_msg}\n"
f"For more details run: 'h5diff {os.path.abspath(output_file)} {reference_file}'\n"
f"To re-create regression file, delete '{reference_file}' and rerun the test."
),
)
def test_output_regression(self) -> Literal[True]:
"""Compare the test output with the reference file.
If the test contains any receivers, a regression check is run,
otherwise it checks the test did not generate an output file.
"""
if self.has_receiver_output:
return self.run_regression_check(
self.output_file, self.reference_file, "Failed output regresssion check"
)
else:
return sn.assert_false(
sn.path_exists(self.output_file),
f"Unexpected output file found: '{self.output_file}'",
)
def test_snapshot_regression(self) -> Literal[True]:
"""Compare the snapshot outputs with reference files.
Generates a regression check for each snapshot. Each regression
check is a deffered expression, so they all need to be returned
so that they are each evaluated.
"""
regression_checks = []
for index, snapshot in enumerate(self.snapshots):
snapshot_path = self.build_snapshot_filepath(snapshot)
reference_file = self.snapshot_reference_files[index]
regression_checks.append(
self.run_regression_check(
snapshot_path, reference_file, f"Failed snapshot regresssion check '{snapshot}'"
)
)
# sn.assert_true is not strictly necessary
return sn.assert_true(sn.all(regression_checks))
# Store error messages so all references files can be checked
# (and created if necessary) before the test is failed.
error_messages = []
for check in self.regression_checks:
if not check.reference_file_exists():
if self.test_dependency is None and check.create_reference_file():
error_messages.append(
f"Reference file does not exist. Creating... '{check.reference_file}'"
)
elif self.test_dependency is not None:
error_messages.append(
f"ERROR: Test dependency did not create reference file: '{check.reference_file}'"
)
else:
error_messages.append(
f"ERROR: Unable to create reference file: '{check.reference_file}'"
)
return sn.assert_true(len(error_messages) < 1, "\n".join(error_messages))
@sanity_function
def regression_check(self) -> Literal[True]:
"""Perform regression check for the test output and snapshots
def regression_check(self) -> bool:
"""Run sanity checks and regression checks.
If not all the reference files exist, then create all the
missing reference files from the test output and fail the test.
Checks will run in the following order:
- Check the simulation completed.
- Check all reference files exist.
- Run all regression checks.
If any of these checks fail, the test will fail and none of the
other later checks will run.
Returns:
test_passed: Returns True if all checks pass.
Raises:
reframe.core.exceptions.SanityError: If any regression
checks fail.
"""
if (not self.has_receiver_output or sn.path_exists(self.reference_file)) and sn.all(
[sn.path_exists(path) for path in self.snapshot_reference_files]
):
return (
self.test_simulation_complete()
and self.test_output_regression()
and self.test_snapshot_regression()
)
else:
error_messages = []
if self.has_receiver_output and not sn.path_exists(self.reference_file):
copyfile(self.output_file, self.reference_file)
error_messages.append(
f"Output reference file does not exist. Creating... '{self.reference_file}'"
)
for index, snapshot in enumerate(self.snapshots):
reference_file = self.snapshot_reference_files[index]
if not sn.path_exists(reference_file):
copyfile(self.build_snapshot_filepath(snapshot), reference_file)
error_messages.append(
f"Snapshot '{snapshot}' reference file does not exist. Creating... '{reference_file}'"
)
return sn.assert_true(False, "\n".join(error_messages))
return (
self.test_simulation_complete()
and self.test_reference_files_exist()
and sn.all(sn.map(lambda check: check.run(), self.regression_checks))
)
@performance_function("s", perf_key="run_time")
def extract_run_time(self):
"""Extract total runtime from the last task to complete"""
"""Extract total runtime from the last task to complete."""
return sn.extractsingle(
r"real\s+(?P<run_time>\S+)", self.stderr, "run_time", float, self.num_tasks - 1
)
@performance_function("s", perf_key="simulation_time")
def extract_simulation_time(self):
"""Extract simulation time reported by gprMax"""
"""Extract simulation time reported by gprMax."""
# sn.extractall throws an error if a group has value None.
# Therefore have to handle the < 1 min, >= 1 min and >= 1 hour cases separately.
@@ -375,115 +434,3 @@ class GprMaxRegressionTest(RunOnlyRegressionTest):
float,
)
return hours * 3600 + minutes * 60 + seconds
class GprMaxAPIRegressionTest(GprMaxRegressionTest):
executable = "time -p python"
@run_after("setup", always_last=True)
def configure_test_run(self):
"""Input files for API tests will be python files"""
super().configure_test_run(input_file_ext=".py")
class GprMaxBScanRegressionTest(GprMaxRegressionTest):
num_models = parameter()
@run_after("setup", always_last=True)
def configure_test_run(self):
"""Add B-Scan specific commandline arguments and postrun cmds"""
self.extra_executable_opts += ["-n", str(self.num_models)]
super().configure_test_run()
# Override postrun_cmds
# Merge output files and create B-Scan plot
self.postrun_cmds = [
f"python -m toolboxes.Utilities.outputfiles_merge {self.model}",
f"mv {self.model}_merged.h5 {self.output_file}",
f"python -m toolboxes.Plotting.plot_Bscan -save {self.output_file} Ez",
]
class GprMaxTaskfarmRegressionTest(GprMaxBScanRegressionTest):
serial_dependency: type[GprMaxRegressionTest]
extra_executable_opts = ["-taskfarm"]
sourcesdir = "src" # Necessary so test is not skipped (set later)
num_tasks = required
def _get_variant(self) -> str:
"""Get test variant with the same model and number of models"""
variant = self.serial_dependency.get_variant_nums(
model=lambda m: m == self.model, num_models=lambda n: n == self.num_models
)
return self.serial_dependency.variant_name(variant[0])
@run_after("init")
def inject_dependencies(self):
"""Test depends on the serial version of the test"""
self.depends_on(self._get_variant(), udeps.by_env)
super().inject_dependencies()
@run_after("init")
def set_variables_from_serial_dependency(self):
"""Set test dependencies to the same as the serial test"""
self.sourcesdir = str(self.serial_dependency.sourcesdir)
self.has_receiver_output = bool(self.serial_dependency.has_receiver_output)
self.snapshots = list(self.serial_dependency.snapshots)
@run_after("setup")
def setup_reference_files(self):
"""
Set the reference file regression checks to the output of the
serial test
"""
target = self.getdep(self._get_variant())
self.reference_file = os.path.join(target.stagedir, target.output_file)
self.snapshot_reference_files = target.snapshot_reference_files
class GprMaxMPIRegressionTest(GprMaxRegressionTest):
# TODO: Make this a variable
serial_dependency: type[GprMaxRegressionTest]
mpi_layout = parameter()
sourcesdir = "src" # Necessary so test is not skipped (set later)
@run_after("setup", always_last=True)
def configure_test_run(self):
"""Add MPI specific commandline arguments"""
self.num_tasks = int(prod(self.mpi_layout))
self.extra_executable_opts = ["--mpi", *map(str, self.mpi_layout)]
super().configure_test_run()
def _get_variant(self) -> str:
"""Get test variant with the same model"""
# TODO: Refactor tests to work with benchmarks
variant = self.serial_dependency.get_variant_nums(
model=lambda m: m == self.model,
# cpu_freq=lambda f: f == self.cpu_freq,
# omp_threads=lambda o: o == 16,
)
return self.serial_dependency.variant_name(variant[0])
@run_after("init")
def inject_dependencies(self):
"""Test depends on the specified serial test"""
self.depends_on(self._get_variant(), udeps.by_env)
super().inject_dependencies()
@run_after("init")
def set_variables_from_serial_dependency(self):
"""Set test dependencies to the same as the serial test"""
self.sourcesdir = str(self.serial_dependency.sourcesdir)
self.has_receiver_output = bool(self.serial_dependency.has_receiver_output)
self.snapshots = list(self.serial_dependency.snapshots)
@run_after("setup")
def setup_reference_files(self):
"""
Set the reference file regression checks to the output of the
serial test
"""
target = self.getdep(self._get_variant())
self.reference_file = os.path.join(target.stagedir, target.output_file)
self.snapshot_reference_files = target.snapshot_reference_files

查看文件

@@ -0,0 +1,166 @@
from pathlib import Path
from typing import Optional
import reframe.utility.typecheck as typ
from numpy import prod
from reframe import RegressionMixin
from reframe.core.builtins import parameter, required, run_after, variable
from typing_extensions import TYPE_CHECKING
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.regression_checks import (
ReceiverRegressionCheck,
RegressionCheck,
SnapshotRegressionCheck,
)
# If using a static type checker, inherit from GprMaxBaseTest as the
# Mixin classes should always have access to resources from that class.
# However, during execution inherit from RegressionMixin.
if TYPE_CHECKING:
GprMaxMixin = GprMaxBaseTest
else:
GprMaxMixin = RegressionMixin
class ReceiverMixin(GprMaxMixin):
number_of_receivers = variable(int, value=-1)
@run_after("setup")
def add_receiver_regression_checks(self):
reference_file = self.build_reference_filepath(self.output_file)
if self.number_of_receivers > 0:
for i in range(self.number_of_receivers):
regression_check = ReceiverRegressionCheck(
self.output_file, reference_file, f"r{i}"
)
self.regression_checks.append(regression_check)
else:
regression_check = RegressionCheck(self.output_file, reference_file)
self.regression_checks.append(regression_check)
class SnapshotMixin(GprMaxMixin):
"""Add regression tests for snapshots.
Attributes:
snapshots (list[str]): List of snapshots to run regression
checks on.
"""
snapshots = variable(typ.List[str], value=[])
def build_snapshot_filepath(self, snapshot: str) -> Path:
"""Build filepath to the specified snapshot.
Args:
snapshot: Name of the snapshot.
"""
return Path(f"{self.model}_snaps", snapshot).with_suffix(".h5")
@run_after("setup")
def add_snapshot_regression_checks(self):
"""Add a regression check for each snapshot.
The test will be skipped if no snapshots have been specified.
"""
self.skip_if(
len(self.snapshots) < 0,
f"Must provide a list of snapshots.",
)
for snapshot in self.snapshots:
snapshot_file = self.build_snapshot_filepath(snapshot)
reference_file = self.build_reference_filepath(snapshot)
regression_check = SnapshotRegressionCheck(snapshot_file, reference_file)
self.regression_checks.append(regression_check)
class PythonApiMixin(GprMaxMixin):
"""Use the GprMax Python API rather than a standard input file."""
@run_after("setup")
def use_python_input_file(self):
"""Input files for API tests will be python files."""
self.executable = "time -p python"
self.input_file = self.input_file.with_suffix(".py")
class MpiMixin(GprMaxMixin):
"""Run test using GprMax MPI functionality.
Attributes:
mpi_layout (parameter[list[int]]): ReFrame parameter to specify
how MPI tasks should be arranged.
"""
mpi_layout = parameter()
@run_after("setup")
def configure_mpi_tasks(self):
"""Set num_tasks and add MPI specific commandline arguments."""
self.num_tasks = int(prod(self.mpi_layout))
self.executable_opts += ["--mpi", *map(str, self.mpi_layout)]
class BScanMixin(GprMaxMixin):
"""Test a B-scan model - a model with a moving source and receiver.
Attributes:
num_models (parameter[int]): Number of models to run.
"""
num_models = parameter()
@run_after("setup")
def setup_bscan_test(self):
"""Add B-scan specific commandline arguments and postrun cmds.
Set the number of models to run, and merge the output files.
"""
self.executable_opts += ["-n", str(self.num_models)]
self.postrun_cmds += [
f"python -m toolboxes.Utilities.outputfiles_merge {self.model}",
f"mv {self.model}_merged.h5 {self.output_file}",
]
def get_test_dependency_variant_name(self, **kwargs) -> Optional[str]:
"""Get unique ReFrame name of the test dependency variant.
By default, filter test dependencies by the model name and the
number of models.
Args:
**kwargs: Additional key-value pairs to filter the parameter
space of the test dependency. The key is the test
parameter name and the value is either a single value or
a unary function that evaluates to True if the parameter
point must be kept, False otherwise.
Returns:
variant_name: Unique name of the test dependency variant.
"""
kwargs.setdefault("num_models", self.num_models)
return super().get_test_dependency_variant_name(**kwargs)
class TaskfarmMixin(GprMaxMixin):
"""Run test using GprMax taskfarm functionality."""
# TODO: Make this a required variabe, or create a new variable to
# proxy it.
# num_tasks = required
@run_after("setup")
def add_taskfarm_flag(self):
"""Add taskfarm specific commandline arguments."""
self.executable_opts += ["--taskfarm"]
class AntennaModelMixin(GprMaxMixin):
"""Test an antenna model."""
pass

查看文件

@@ -0,0 +1,137 @@
from os import PathLike
from pathlib import Path
from shutil import copyfile
from typing import Literal, Optional, Union
import reframe.utility.sanity as sn
from reframe.core.runtime import runtime
from reframe.utility import osext
class RegressionCheck:
"""Compare two hdf5 files using h5diff"""
def __init__(
self, output_file: Union[str, PathLike], reference_file: Union[str, PathLike]
) -> None:
"""Create a new regression check.
Args:
output_file: Path to output file generate by the test.
reference_file: Path to reference file to run the regression
check against.
"""
self.output_file = Path(output_file)
self.reference_file = Path(reference_file)
self.h5diff_options: list[str] = []
@property
def error_msg(self) -> str:
"""Message to display if the regression check fails"""
return "Failed regression check"
def create_reference_file(self) -> bool:
"""Create reference file if it does not already exist.
The reference file is created as a copy of the current output
file.
Returns:
file_created: Returns True if a new file was created, False
if the path already exists.
"""
if not sn.path_exists(self.reference_file):
self.reference_file.parent.mkdir(parents=True, exist_ok=True)
copyfile(self.output_file, self.reference_file)
return True
else:
return False
def reference_file_exists(self) -> bool:
"""Check if the reference file exists.
Returns:
file_exists: Returns true if the reference filepath is a
regular file, False otherwise.
"""
return sn.path_isfile(self.reference_file)
def run(self) -> Literal[True]:
"""Run the regression check using h5diff.
Returns:
check_passed: Returns True if the output file matches the
reference file (i.e. no output from h5diff). Otherwise,
raises a SanityError.
Raises:
reframe.core.exceptions.SanityError: If the output file does
not exist, or the regression check fails.
"""
if runtime().system.name == "archer2":
h5diff = "/opt/cray/pe/hdf5/default/bin/h5diff"
else:
h5diff = "h5diff"
h5diff_output = osext.run_command(
[h5diff, *self.h5diff_options, str(self.output_file), str(self.reference_file)]
)
return sn.assert_true(
sn.path_isfile(self.output_file),
f"Expected output file '{self.output_file}' does not exist",
) and sn.assert_false(
h5diff_output.stdout,
(
f"{self.error_msg}\n"
f"For more details run: '{' '.join(h5diff_output.args)}'\n"
f"To re-create regression file, delete '{self.reference_file}' and rerun the test."
),
)
class ReceiverRegressionCheck(RegressionCheck):
"""Run regression check on individual reveivers in output files.
This can include arbitrary receivers in each file, or two receivers
in the same file.
"""
def __init__(
self,
output_file: Union[str, PathLike],
reference_file: Union[str, PathLike],
output_receiver: Optional[str],
reference_receiver: Optional[str] = None,
) -> None:
"""Create a new receiver regression check.
Args:
output_file: Path to output file generate by the test.
reference_file: Path to reference file to run the regression
check against.
output_receiver: Output receiver to check.
reference_receiver: Optional receiver to check against in
the reference file. If None, this will be the same as
the output receiver.
"""
super().__init__(output_file, reference_file)
self.output_receiver = output_receiver
self.reference_receiver = reference_receiver
self.h5diff_options.append(f"rxs/{self.output_receiver}")
if self.reference_receiver is not None:
self.h5diff_options.append(f"rxs/{self.reference_receiver}")
@property
def error_msg(self) -> str:
return f"Receiver '{self.output_receiver}' failed regression check"
class SnapshotRegressionCheck(RegressionCheck):
"""Run regression check on a gprMax Snapshot."""
@property
def error_msg(self) -> str:
return f"Snapshot '{self.output_file.name}' failed regression check "

查看文件

@@ -0,0 +1,10 @@
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.mixins import ReceiverMixin, SnapshotMixin
class GprMaxRegressionTest(ReceiverMixin, GprMaxBaseTest):
pass
class GprMaxSnapshotTest(SnapshotMixin, GprMaxBaseTest):
pass

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for 2D models (TMx, TMy, and TMz)
"""
@@ -33,24 +34,21 @@ class Test2DModelYZ(GprMaxRegressionTest):
@rfm.simple_test
class Test2DModelXYMpi(GprMaxMPIRegressionTest):
class Test2DModelXYMpi(MpiMixin, Test2DModelXY):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole"}
mpi_layout = parameter([[4, 4, 1]])
serial_dependency = Test2DModelXY
model = serial_dependency.model
test_dependency = Test2DModelXY
@rfm.simple_test
class Test2DModelXZMpi(GprMaxMPIRegressionTest):
class Test2DModelXZMpi(MpiMixin, Test2DModelXZ):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole"}
mpi_layout = parameter([[4, 1, 4]])
serial_dependency = Test2DModelXZ
model = serial_dependency.model
test_dependency = Test2DModelXZ
@rfm.simple_test
class Test2DModelYZMpi(GprMaxMPIRegressionTest):
class Test2DModelYZMpi(MpiMixin, Test2DModelYZ):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole"}
mpi_layout = parameter([[1, 4, 4]])
serial_dependency = Test2DModelYZ
model = serial_dependency.model
test_dependency = Test2DModelYZ

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxBScanRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import BScanMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for example models in gprMax documentation
"""
@@ -25,7 +26,7 @@ class TestAscan(GprMaxRegressionTest):
@rfm.simple_test
class TestBscan(GprMaxBScanRegressionTest):
class TestBscan(BScanMixin, GprMaxRegressionTest):
tags = {
"test",
"serial",

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter, run_before
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import AntennaModelMixin, MpiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for models defining geometry
"""
@@ -34,11 +35,10 @@ class TestBoxGeometryNoPml(GprMaxRegressionTest):
@rfm.simple_test
class TestEdgeGeometry(GprMaxRegressionTest):
class TestEdgeGeometry(AntennaModelMixin, GprMaxRegressionTest):
tags = {"test", "serial", "geometry", "edge", "transmission_line", "waveform", "antenna"}
sourcesdir = "src/geometry_tests/edge_geometry"
model = parameter(["antenna_wire_dipole_fs"])
is_antenna_model = True
"""Test MPI Functionality
@@ -46,29 +46,21 @@ class TestEdgeGeometry(GprMaxRegressionTest):
@rfm.simple_test
class TestBoxGeometryDefaultPmlMpi(GprMaxMPIRegressionTest):
class TestBoxGeometryDefaultPmlMpi(MpiMixin, TestBoxGeometryDefaultPml):
tags = {"test", "mpi", "geometery", "box"}
mpi_layout = parameter([[2, 2, 2], [3, 3, 3], [4, 4, 4]])
serial_dependency = TestBoxGeometryDefaultPml
model = serial_dependency.model
test_dependency = TestBoxGeometryDefaultPml
@rfm.simple_test
class TestBoxGeometryNoPmlMpi(GprMaxMPIRegressionTest):
class TestBoxGeometryNoPmlMpi(MpiMixin, TestBoxGeometryNoPml):
tags = {"test", "mpi", "geometery", "box"}
mpi_layout = parameter([[2, 2, 2], [3, 3, 3], [4, 4, 4]])
serial_dependency = TestBoxGeometryNoPml
model = serial_dependency.model
@run_before("run")
def add_gprmax_commands(self):
self.prerun_cmds.append(f"echo '#pml_cells: 0' >> {self.input_file}")
test_dependency = TestBoxGeometryNoPml
@rfm.simple_test
class TestEdgeGeometryMpi(GprMaxMPIRegressionTest):
class TestEdgeGeometryMpi(MpiMixin, TestEdgeGeometry):
tags = {"test", "mpi", "geometry", "edge", "transmission_line", "waveform", "antenna"}
mpi_layout = parameter([[3, 3, 3]])
serial_dependency = TestEdgeGeometry
model = serial_dependency.model
is_antenna_model = True
test_dependency = TestEdgeGeometry

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for each gprMax source
"""
@@ -19,8 +20,7 @@ class TestDispersiveMaterials(GprMaxRegressionTest):
@rfm.simple_test
class TestDispersiveMaterialsMpi(GprMaxMPIRegressionTest):
class TestDispersiveMaterialsMpi(MpiMixin, TestDispersiveMaterials):
tags = {"test", "mpi", "hertzian_dipole", "waveform", "material", "dispersive", "box"}
mpi_layout = parameter([[3, 3, 3]])
serial_dependency = TestDispersiveMaterials
model = serial_dependency.model
test_dependency = TestDispersiveMaterials

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for models defining geometry
"""
@@ -12,7 +13,6 @@ class TestSingleCellPml(GprMaxRegressionTest):
tags = {"test", "serial", "geometery", "box", "pml"}
sourcesdir = "src/pml_tests"
model = parameter(["single_cell_pml_2d"])
rx_outputs = ["Hx"]
"""Test MPI Functionality
@@ -20,9 +20,7 @@ class TestSingleCellPml(GprMaxRegressionTest):
@rfm.simple_test
class TestSingleCellPmlMpi(GprMaxMPIRegressionTest):
class TestSingleCellPmlMpi(MpiMixin, TestSingleCellPml):
tags = {"test", "mpi", "geometery", "box", "pml"}
mpi_layout = parameter([[2, 2, 1], [3, 3, 1]])
serial_dependency = TestSingleCellPml
model = serial_dependency.model
rx_outputs = ["Hx"]
test_dependency = TestSingleCellPml

查看文件

@@ -1,33 +1,31 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxSnapshotTest
@rfm.simple_test
class Test2DSnapshot(GprMaxRegressionTest):
class Test2DSnapshot(GprMaxSnapshotTest):
tags = {"test", "serial", "2d", "waveform", "hertzian_dipole", "snapshot"}
sourcesdir = "src/snapshot_tests"
model = parameter(["whole_domain_2d"])
has_receiver_output = False
snapshots = ["snapshot_0.h5", "snapshot_1.h5", "snapshot_2.h5", "snapshot_3.h5"]
@rfm.simple_test
class TestSnapshot(GprMaxRegressionTest):
class TestSnapshot(GprMaxSnapshotTest):
tags = {"test", "serial", "2d", "waveform", "hertzian_dipole", "snapshot"}
sourcesdir = "src/snapshot_tests"
model = parameter(["whole_domain"])
has_receiver_output = False
snapshots = ["snapshot_0.h5", "snapshot_1.h5", "snapshot_2.h5", "snapshot_3.h5"]
@rfm.simple_test
class Test2DSliceSnapshot(GprMaxRegressionTest):
class Test2DSliceSnapshot(GprMaxSnapshotTest):
tags = {"test", "serial", "2d", "waveform", "hertzian_dipole", "snapshot"}
sourcesdir = "src/snapshot_tests"
model = parameter(["2d_slices"])
has_receiver_output = False
snapshots = [
"snapshot_x_05.h5",
"snapshot_x_35.h5",
@@ -49,15 +47,14 @@ class Test2DSliceSnapshot(GprMaxRegressionTest):
@rfm.simple_test
class Test2DSnapshotMpi(GprMaxMPIRegressionTest):
class Test2DSnapshotMpi(MpiMixin, Test2DSnapshot):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole", "snapshot"}
mpi_layout = parameter([[2, 2, 1], [3, 3, 1], [4, 4, 1]])
serial_dependency = Test2DSnapshot
model = serial_dependency.model
test_dependency = Test2DSnapshot
@rfm.simple_test
class TestSnapshotMpi(GprMaxMPIRegressionTest):
class TestSnapshotMpi(MpiMixin, TestSnapshot):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole", "snapshot"}
mpi_layout = parameter(
[
@@ -72,12 +69,11 @@ class TestSnapshotMpi(GprMaxMPIRegressionTest):
[4, 4, 4],
]
)
serial_dependency = TestSnapshot
model = serial_dependency.model
test_dependency = TestSnapshot
@rfm.simple_test
class Test2DSliceSnapshotMpi(GprMaxMPIRegressionTest):
class Test2DSliceSnapshotMpi(MpiMixin, Test2DSliceSnapshot):
tags = {"test", "mpi", "2d", "waveform", "hertzian_dipole", "snapshot"}
mpi_layout = parameter(
[
@@ -92,5 +88,4 @@ class Test2DSliceSnapshotMpi(GprMaxMPIRegressionTest):
[4, 4, 4],
]
)
serial_dependency = Test2DSliceSnapshot
model = serial_dependency.model
test_dependency = Test2DSliceSnapshot

查看文件

@@ -1,7 +1,8 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.base_tests import GprMaxMPIRegressionTest, GprMaxRegressionTest
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for each gprMax source
"""
@@ -33,24 +34,21 @@ class TestTransmissionLineSource(GprMaxRegressionTest):
@rfm.simple_test
class TestHertzianDipoleSourceMpi(GprMaxMPIRegressionTest):
class TestHertzianDipoleSourceMpi(MpiMixin, TestHertzianDipoleSource):
tags = {"test", "mpi", "hertzian_dipole", "waveform"}
mpi_layout = parameter([[3, 3, 3]])
serial_dependency = TestHertzianDipoleSource
model = serial_dependency.model
test_dependency = TestHertzianDipoleSource
@rfm.simple_test
class TestMagneticDipoleSourceMpi(GprMaxMPIRegressionTest):
class TestMagneticDipoleSourceMpi(MpiMixin, TestMagneticDipoleSource):
tags = {"test", "mpi", "magnetic_dipole", "waveform"}
mpi_layout = parameter([[3, 3, 3]])
serial_dependency = TestMagneticDipoleSource
model = serial_dependency.model
test_dependency = TestMagneticDipoleSource
@rfm.simple_test
class TestTransmissionLineSourceMpi(GprMaxMPIRegressionTest):
class TestTransmissionLineSourceMpi(MpiMixin, TestTransmissionLineSource):
tags = {"test", "mpi", "transmission_line", "waveform"}
mpi_layout = parameter([[3, 3, 3]])
serial_dependency = TestTransmissionLineSource
model = serial_dependency.model
test_dependency = TestTransmissionLineSource

查看文件

@@ -1,14 +1,15 @@
import reframe as rfm
from reframe.core.builtins import parameter, run_after
from reframe_tests.tests.base_tests import GprMaxAPIRegressionTest
from reframe_tests.tests.mixins import AntennaModelMixin, PythonApiMixin
from reframe_tests.tests.standard_tests import GprMaxRegressionTest
"""Reframe regression tests for subgrids
"""
@rfm.simple_test
class TestSubgrids(GprMaxAPIRegressionTest):
class TestSubgrids(PythonApiMixin, GprMaxRegressionTest):
tags = {
"test",
"api",
@@ -25,7 +26,7 @@ class TestSubgrids(GprMaxAPIRegressionTest):
@rfm.simple_test
class TestSubgridsWithAntennaModel(GprMaxAPIRegressionTest):
class TestSubgridsWithAntennaModel(AntennaModelMixin, PythonApiMixin, GprMaxRegressionTest):
tags = {
"test",
"api",
@@ -39,7 +40,6 @@ class TestSubgridsWithAntennaModel(GprMaxAPIRegressionTest):
}
sourcesdir = "src/subgrid_tests"
model = parameter(["gssi_400_over_fractal_subsurface"])
is_antenna_model = True
@run_after("init")
def skip_test(self):

查看文件

@@ -1,6 +1,6 @@
import reframe as rfm
from reframe_tests.tests.base_tests import GprMaxTaskfarmRegressionTest
from reframe_tests.tests.mixins import TaskfarmMixin
from reframe_tests.tests.test_example_models import TestBscan
"""Reframe regression tests for taskfarm functionality
@@ -8,7 +8,7 @@ from reframe_tests.tests.test_example_models import TestBscan
@rfm.simple_test
class TestSingleNodeTaskfarm(GprMaxTaskfarmRegressionTest):
class TestSingleNodeTaskfarm(TaskfarmMixin, TestBscan):
tags = {
"test",
"mpi",
@@ -22,13 +22,11 @@ class TestSingleNodeTaskfarm(GprMaxTaskfarmRegressionTest):
}
num_tasks = 8
num_tasks_per_node = 8
serial_dependency = TestBscan
model = serial_dependency.model
num_models = serial_dependency.num_models
test_dependency = TestBscan
@rfm.simple_test
class TestMultiNodeTaskfarm(GprMaxTaskfarmRegressionTest):
class TestMultiNodeTaskfarm(TaskfarmMixin, TestBscan):
tags = {
"test",
"mpi",
@@ -42,6 +40,4 @@ class TestMultiNodeTaskfarm(GprMaxTaskfarmRegressionTest):
}
num_tasks = 32
num_tasks_per_node = 8
serial_dependency = TestBscan
model = serial_dependency.model
num_models = serial_dependency.num_models
test_dependency = TestBscan

查看文件

@@ -97,7 +97,7 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input_file", help="Path to input file")
parser.add_argument("reference_file", help="Path to reference file")
parser.add_argument("-model-name", "-name", "-n", help="Name of the model", default="model")
parser.add_argument("--model-name", "--name", "-n", help="Name of the model", default="model")
args = parser.parse_args()

查看文件

@@ -15,7 +15,13 @@ def get_parameter_names(item):
return re.findall(f"\s%(?P<name>\S+)=\S+", item)
columns_to_keep = ["num_tasks", "num_cpus_per_task", "num_tasks_per_node", "run_time_value", "simulation_time_value"]
columns_to_keep = [
"num_tasks",
"num_cpus_per_task",
"num_tasks_per_node",
"run_time_value",
"simulation_time_value",
]
if __name__ == "__main__":
# Parse command line arguments
@@ -40,13 +46,15 @@ if __name__ == "__main__":
columns_to_keep.sort()
perflog = perflog[columns_to_keep].sort_values(columns_to_keep)
perflog["simulation_time_value"] = perflog["simulation_time_value"].apply(round, args=[2])
perflog = perflog.rename(columns={"simulation_time_value": "simulation_time", "run_time_value": "run_time"})
perflog = perflog.rename(
columns={"simulation_time_value": "simulation_time", "run_time_value": "run_time"}
)
# Save output to file
if args.output:
outputfile = args.output
else:
stem = f"{Path(args.inputfile).stem}_{datetime.today().strftime('%Y-%m-%d_%H-%M-%S')}"
outputfile = Path("benchmarks", stem).with_suffix(".csv")
outputfile = Path("benchmark_results", stem).with_suffix(".csv")
perflog.to_csv(outputfile, index=False)
print(f"Saved benchmark: '{outputfile}'")