你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-07 15:10:13 +08:00
Update documentation for new ReFrame classes
这个提交包含在:
@@ -2,7 +2,7 @@
|
||||
|
||||
Usage (run all tests):
|
||||
cd gprMax/reframe_tests
|
||||
reframe -C configuraiton/{CONFIG_FILE} -c tests/ -r
|
||||
reframe -C configuration/{CONFIG_FILE} -c tests/ -r
|
||||
"""
|
||||
|
||||
import os
|
||||
@@ -29,13 +29,18 @@ from reframe.utility import udeps
|
||||
from reframe_tests.tests.regression_checks import RegressionCheck
|
||||
from reframe_tests.utilities.deferrable import path_join
|
||||
|
||||
TESTS_ROOT_DIR = Path(__file__).parent
|
||||
GPRMAX_ROOT_DIR = Path(__file__).parent.parent.parent.resolve()
|
||||
PATH_TO_PYENV = os.path.join(".venv", "bin", "activate")
|
||||
|
||||
|
||||
@simple_test
|
||||
class CreatePyenvTest(RunOnlyRegressionTest):
|
||||
"""Create a fresh virtual environment for running the tests.
|
||||
|
||||
The test checks for any errors from pip installing gprMax and its
|
||||
dependencies.
|
||||
"""
|
||||
|
||||
valid_systems = ["generic", "archer2:login"]
|
||||
valid_prog_environs = ["builtin", "PrgEnv-gnu"]
|
||||
modules = ["cray-python"]
|
||||
@@ -50,7 +55,7 @@ class CreatePyenvTest(RunOnlyRegressionTest):
|
||||
|
||||
@run_after("init")
|
||||
def install_system_specific_dependencies(self):
|
||||
"""Install additional dependencies for specific systems"""
|
||||
"""Install additional dependencies for specific systems."""
|
||||
if self.current_system.name == "archer2":
|
||||
"""
|
||||
Needed to prevent a pip install error.
|
||||
@@ -74,9 +79,11 @@ class CreatePyenvTest(RunOnlyRegressionTest):
|
||||
|
||||
@sanity_function
|
||||
def check_requirements_installed(self):
|
||||
"""
|
||||
Check packages successfully installed from requirements.txt
|
||||
Check gprMax installed successfully and no other errors thrown
|
||||
"""Check packages were successfully installed.
|
||||
|
||||
Check pip is up to date and gprMax dependencies from
|
||||
requirements.txt were successfully installed. Check gprMax was
|
||||
installed successfully and no other errors were thrown.
|
||||
"""
|
||||
return (
|
||||
sn.assert_found(
|
||||
@@ -98,6 +105,21 @@ class CreatePyenvTest(RunOnlyRegressionTest):
|
||||
|
||||
|
||||
class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
"""Base class that all GprMax tests should inherit from.
|
||||
|
||||
Test functionality can be augmented by using Mixin classes.
|
||||
|
||||
Attributes:
|
||||
model (parameter[str]): ReFrame parameter to specify the model
|
||||
name(s).
|
||||
sourcesdir (str): Relative path to the test's src directory.
|
||||
regression_checks (list[RegressionCheck]): List of regression
|
||||
checks to perform.
|
||||
test_dependency (type[GprMaxBaseTest] | None): Optional test
|
||||
dependency. If specified, regression checks will use
|
||||
reference files created by the test dependency.
|
||||
"""
|
||||
|
||||
valid_systems = ["archer2:compute"]
|
||||
valid_prog_environs = ["PrgEnv-gnu"]
|
||||
modules = ["cray-python"]
|
||||
@@ -119,13 +141,30 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
# test_dependency = variable(type(None), type, value=None)
|
||||
|
||||
def get_test_dependency_variant_name(self, **kwargs) -> Optional[str]:
|
||||
"""Get unique ReFrame name of the test dependency variant.
|
||||
|
||||
By default, filter test dependencies by the model name.
|
||||
|
||||
Args:
|
||||
**kwargs: Additional key-value pairs to filter the parameter
|
||||
space of the test dependency. The key is the test
|
||||
parameter name and the value is either a single value or
|
||||
a unary function that evaluates to True if the parameter
|
||||
point must be kept, False otherwise.
|
||||
|
||||
Returns:
|
||||
variant_name: Unique name of the test dependency variant.
|
||||
"""
|
||||
if self.test_dependency is None:
|
||||
return None
|
||||
|
||||
variant_nums = self.test_dependency.get_variant_nums(model=self.model, **kwargs)
|
||||
# Always filter by the model parameter, but allow child classes
|
||||
# (or mixins) to override how models are filtered.
|
||||
kwargs.setdefault("model", self.model)
|
||||
|
||||
variant_nums = self.test_dependency.get_variant_nums(**kwargs)
|
||||
|
||||
if len(variant_nums) < 1:
|
||||
kwargs.setdefault("model", self.model)
|
||||
raise DependencyError(
|
||||
f"No variant of '{self.test_dependency.__name__}' meets conditions: {kwargs}",
|
||||
)
|
||||
@@ -133,7 +172,11 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
return self.test_dependency.variant_name(variant_nums[0])
|
||||
|
||||
def get_test_dependency(self) -> Optional["GprMaxBaseTest"]:
|
||||
"""Get test variant with the same model and number of models"""
|
||||
"""Get correct ReFrame test case from the test dependency.
|
||||
|
||||
Returns:
|
||||
test_case: ReFrame test case.
|
||||
"""
|
||||
variant = self.get_test_dependency_variant_name()
|
||||
if variant is None:
|
||||
return None
|
||||
@@ -141,6 +184,19 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
return self.getdep(variant)
|
||||
|
||||
def build_reference_filepath(self, name: Union[str, os.PathLike]) -> Path:
|
||||
"""Build path to the specified reference file.
|
||||
|
||||
Reference files are saved in directories per test case. If this
|
||||
test does not specify a test dependency, it will save and manage
|
||||
its own reference files in its own directory. Otherwise, it will
|
||||
use reference files saved by its test dependency.
|
||||
|
||||
Args:
|
||||
name: Name of the file.
|
||||
|
||||
Returns:
|
||||
filepath: Absolute path to the reference file.
|
||||
"""
|
||||
target = self.get_test_dependency()
|
||||
if target is None:
|
||||
reference_dir = self.short_name
|
||||
@@ -150,9 +206,32 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
reference_file = Path("regression_checks", reference_dir, name).with_suffix(".h5")
|
||||
return reference_file.absolute()
|
||||
|
||||
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency
|
||||
@run_after("init")
|
||||
def inject_dependencies(self):
|
||||
"""Specify test dependencies.
|
||||
|
||||
All tests depend on the Python virtual environment building
|
||||
correctly and their own test dependency if specified.
|
||||
"""
|
||||
self.depends_on("CreatePyenvTest", udeps.by_env)
|
||||
if self.test_dependency is not None:
|
||||
variant = self.get_test_dependency_variant_name()
|
||||
self.depends_on(variant, udeps.by_env)
|
||||
|
||||
@require_deps
|
||||
def get_pyenv_path(self, CreatePyenvTest):
|
||||
"""Add prerun command to load the built Python environment."""
|
||||
path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV)
|
||||
self.prerun_cmds.append(f"source {path_to_pyenv}")
|
||||
|
||||
@run_after("init")
|
||||
def setup_env_vars(self):
|
||||
"""Set OMP_NUM_THREADS environment variable from num_cpus_per_task"""
|
||||
"""Set necessary environment variables.
|
||||
|
||||
Set OMP_NUM_THREADS environment variable from num_cpus_per_task
|
||||
and other system specific varaibles.
|
||||
"""
|
||||
self.env_vars["OMP_NUM_THREADS"] = self.num_cpus_per_task
|
||||
|
||||
if self.current_system.name == "archer2":
|
||||
@@ -163,34 +242,19 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
# Set the matplotlib cache to the work filesystem
|
||||
self.env_vars["MPLCONFIGDIR"] = "${HOME/home/work}/.config/matplotlib"
|
||||
|
||||
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency
|
||||
@run_after("init")
|
||||
def inject_dependencies(self):
|
||||
"""Test depends on the Python virtual environment building correctly"""
|
||||
self.depends_on("CreatePyenvTest", udeps.by_env)
|
||||
if self.test_dependency is not None:
|
||||
variant = self.get_test_dependency_variant_name()
|
||||
self.depends_on(variant, udeps.by_env)
|
||||
# self.depends_on(self.test_dependency, udeps.by_env)
|
||||
|
||||
@require_deps
|
||||
def get_pyenv_path(self, CreatePyenvTest):
|
||||
"""Add prerun command to load the built Python environment"""
|
||||
path_to_pyenv = os.path.join(CreatePyenvTest(part="login").stagedir, PATH_TO_PYENV)
|
||||
self.prerun_cmds.append(f"source {path_to_pyenv}")
|
||||
|
||||
@run_after("init")
|
||||
def set_file_paths(self):
|
||||
"""Set default test input and output files.
|
||||
|
||||
These are set in a post-init hook to allow mixins to use them
|
||||
later in the pipeline.
|
||||
"""
|
||||
self.input_file = Path(f"{self.model}.in")
|
||||
self.output_file = Path(f"{self.model}.h5")
|
||||
|
||||
@run_before("run")
|
||||
def configure_test_run(self):
|
||||
"""Configure gprMax commandline arguments and plot outputs
|
||||
|
||||
Set the input and output files and add postrun commands to plot
|
||||
the outputs.
|
||||
"""
|
||||
"""Configure gprMax commandline arguments and files to keep."""
|
||||
input_file = str(self.input_file)
|
||||
output_file = str(self.output_file)
|
||||
|
||||
@@ -202,7 +266,9 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
"10",
|
||||
"--hide-progress-bars",
|
||||
]
|
||||
self.keep_files += [input_file, output_file]
|
||||
|
||||
regression_output_files = [r.output_file for r in self.regression_checks]
|
||||
self.keep_files += [input_file, output_file, *regression_output_files]
|
||||
|
||||
"""
|
||||
if self.has_receiver_output:
|
||||
@@ -226,7 +292,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
|
||||
@run_before("run")
|
||||
def combine_task_outputs(self):
|
||||
"""Split output from each MPI rank
|
||||
"""Split output from each MPI rank.
|
||||
|
||||
If running with multiple MPI ranks, split the output into
|
||||
seperate files and add postrun commands to combine the files
|
||||
@@ -245,23 +311,17 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
self.postrun_cmds.append(f"cat out/{stdout}_*.out >> {self.stdout}")
|
||||
self.postrun_cmds.append(f"cat err/{stderr}_*.err >> {self.stderr}")
|
||||
|
||||
# @run_before("run")
|
||||
# def check_input_file_exists(self):
|
||||
# """Skip test if input file does not exist"""
|
||||
# # Current working directory will be where the reframe job was launched
|
||||
# # However reframe assumes the source directory is relative to the test file
|
||||
# with osext.change_dir(TESTS_ROOT_DIR):
|
||||
# self.skip_if(
|
||||
# not os.path.exists(self.sourcesdir),
|
||||
# f"Source directory '{self.sourcesdir}' does not exist. Current working directory: '{os.getcwd()}'",
|
||||
# )
|
||||
# self.skip_if(
|
||||
# not os.path.exists(os.path.join(self.sourcesdir, self.input_file)),
|
||||
# f"Input file '{self.input_file}' not present in source directory '{self.sourcesdir}'",
|
||||
# )
|
||||
|
||||
def test_simulation_complete(self) -> Literal[True]:
|
||||
"""Check simulation completed successfully"""
|
||||
"""Check simulation completed successfully.
|
||||
|
||||
Returns:
|
||||
simulation_completed: Returns True if the simulation
|
||||
completed, otherwise it fails the test.
|
||||
|
||||
Raises:
|
||||
reframe.core.exceptions.SanityError: If the simulation did
|
||||
not complete.
|
||||
"""
|
||||
return sn.assert_not_found(
|
||||
r"(?i)error",
|
||||
self.stderr,
|
||||
@@ -270,13 +330,20 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
r"=== Simulation completed in ", self.stdout, "Simulation did not complete"
|
||||
)
|
||||
|
||||
@sanity_function
|
||||
def regression_check(self) -> bool:
|
||||
"""Perform regression check for the test output and snapshots
|
||||
def test_reference_files_exist(self) -> Literal[True]:
|
||||
"""Check all reference files exist and create any missing ones.
|
||||
|
||||
If not all the reference files exist, then create all the
|
||||
missing reference files from the test output and fail the test.
|
||||
Returns:
|
||||
files_exist: Returns True if all reference files exist,
|
||||
otherwise it fails the test.
|
||||
|
||||
Raises:
|
||||
reframe.core.exceptions.SanityError: If any reference files
|
||||
do not exist.
|
||||
"""
|
||||
|
||||
# Store error messages so all references files can be checked
|
||||
# (and created if necessary) before the test is failed.
|
||||
error_messages = []
|
||||
for check in self.regression_checks:
|
||||
if not check.reference_file_exists():
|
||||
@@ -292,23 +359,44 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
error_messages.append(
|
||||
f"ERROR: Unable to create reference file: '{check.reference_file}'"
|
||||
)
|
||||
return sn.assert_true(len(error_messages) < 1, "\n".join(error_messages))
|
||||
|
||||
@sanity_function
|
||||
def regression_check(self) -> bool:
|
||||
"""Run sanity checks and regression checks.
|
||||
|
||||
Checks will run in the following order:
|
||||
- Check the simulation completed.
|
||||
- Check all reference files exist.
|
||||
- Run all regression checks.
|
||||
|
||||
If any of these checks fail, the test will fail and none of the
|
||||
other later checks will run.
|
||||
|
||||
Returns:
|
||||
test_passed: Returns True if all checks pass.
|
||||
|
||||
Raises:
|
||||
reframe.core.exceptions.SanityError: If any regression
|
||||
checks fail.
|
||||
"""
|
||||
|
||||
return (
|
||||
self.test_simulation_complete()
|
||||
and sn.assert_true(len(error_messages) < 1, "\n".join(error_messages))
|
||||
and self.test_reference_files_exist()
|
||||
and sn.all(sn.map(lambda check: check.run(), self.regression_checks))
|
||||
)
|
||||
|
||||
@performance_function("s", perf_key="run_time")
|
||||
def extract_run_time(self):
|
||||
"""Extract total runtime from the last task to complete"""
|
||||
"""Extract total runtime from the last task to complete."""
|
||||
return sn.extractsingle(
|
||||
r"real\s+(?P<run_time>\S+)", self.stderr, "run_time", float, self.num_tasks - 1
|
||||
)
|
||||
|
||||
@performance_function("s", perf_key="simulation_time")
|
||||
def extract_simulation_time(self):
|
||||
"""Extract simulation time reported by gprMax"""
|
||||
"""Extract simulation time reported by gprMax."""
|
||||
|
||||
# sn.extractall throws an error if a group has value None.
|
||||
# Therefore have to handle the < 1 min, >= 1 min and >= 1 hour cases separately.
|
||||
@@ -347,116 +435,3 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
|
||||
float,
|
||||
)
|
||||
return hours * 3600 + minutes * 60 + seconds
|
||||
|
||||
|
||||
class GprMaxAPIRegressionTest(GprMaxBaseTest):
|
||||
executable = "time -p python"
|
||||
|
||||
@run_after("setup", always_last=True)
|
||||
def configure_test_run(self):
|
||||
"""Input files for API tests will be python files"""
|
||||
# super().configure_test_run(input_file_ext=".py")
|
||||
pass
|
||||
|
||||
|
||||
class GprMaxBScanRegressionTest(GprMaxBaseTest):
|
||||
num_models = parameter()
|
||||
|
||||
@run_after("setup", always_last=True)
|
||||
def configure_test_run(self):
|
||||
"""Add B-Scan specific commandline arguments and postrun cmds"""
|
||||
self.extra_executable_opts += ["-n", str(self.num_models)]
|
||||
super().configure_test_run()
|
||||
|
||||
# Override postrun_cmds
|
||||
# Merge output files and create B-Scan plot
|
||||
self.postrun_cmds = [
|
||||
f"python -m toolboxes.Utilities.outputfiles_merge {self.model}",
|
||||
f"mv {self.model}_merged.h5 {self.output_file}",
|
||||
f"python -m toolboxes.Plotting.plot_Bscan -save {self.output_file} Ez",
|
||||
]
|
||||
|
||||
|
||||
class GprMaxTaskfarmRegressionTest(GprMaxBScanRegressionTest):
|
||||
serial_dependency: type[GprMaxBaseTest]
|
||||
extra_executable_opts = ["-taskfarm"]
|
||||
sourcesdir = "src" # Necessary so test is not skipped (set later)
|
||||
|
||||
num_tasks = required
|
||||
|
||||
def _get_variant(self) -> str:
|
||||
"""Get test variant with the same model and number of models"""
|
||||
variant = self.serial_dependency.get_variant_nums(
|
||||
model=lambda m: m == self.model, num_models=lambda n: n == self.num_models
|
||||
)
|
||||
return self.serial_dependency.variant_name(variant[0])
|
||||
|
||||
@run_after("init")
|
||||
def inject_dependencies(self):
|
||||
"""Test depends on the serial version of the test"""
|
||||
self.depends_on(self._get_variant(), udeps.by_env)
|
||||
super().inject_dependencies()
|
||||
|
||||
@run_after("init")
|
||||
def set_variables_from_serial_dependency(self):
|
||||
"""Set test dependencies to the same as the serial test"""
|
||||
self.sourcesdir = str(self.serial_dependency.sourcesdir)
|
||||
self.has_receiver_output = bool(self.serial_dependency.has_receiver_output)
|
||||
self.snapshots = list(self.serial_dependency.snapshots)
|
||||
|
||||
@run_after("setup")
|
||||
def setup_reference_files(self):
|
||||
"""
|
||||
Set the reference file regression checks to the output of the
|
||||
serial test
|
||||
"""
|
||||
target = self.getdep(self._get_variant())
|
||||
self.reference_file = os.path.join(target.stagedir, target.output_file)
|
||||
self.snapshot_reference_files = target.snapshot_reference_files
|
||||
|
||||
|
||||
class GprMaxMPIRegressionTest(GprMaxBaseTest):
|
||||
# TODO: Make this a variable
|
||||
serial_dependency: type[GprMaxBaseTest]
|
||||
mpi_layout = parameter()
|
||||
sourcesdir = "src" # Necessary so test is not skipped (set later)
|
||||
|
||||
@run_after("setup", always_last=True)
|
||||
def configure_test_run(self):
|
||||
"""Add MPI specific commandline arguments"""
|
||||
self.num_tasks = int(prod(self.mpi_layout))
|
||||
self.extra_executable_opts = ["--mpi", *map(str, self.mpi_layout)]
|
||||
super().configure_test_run()
|
||||
|
||||
def _get_variant(self) -> str:
|
||||
"""Get test variant with the same model"""
|
||||
# TODO: Refactor tests to work with benchmarks
|
||||
variant = self.serial_dependency.get_variant_nums(
|
||||
model=lambda m: m == self.model,
|
||||
# cpu_freq=lambda f: f == self.cpu_freq,
|
||||
# omp_threads=lambda o: o == 16,
|
||||
)
|
||||
return self.serial_dependency.variant_name(variant[0])
|
||||
|
||||
@run_after("init")
|
||||
def inject_dependencies(self):
|
||||
"""Test depends on the specified serial test"""
|
||||
self.depends_on(self._get_variant(), udeps.by_env)
|
||||
super().inject_dependencies()
|
||||
|
||||
@run_after("init")
|
||||
def set_variables_from_serial_dependency(self):
|
||||
"""Set test dependencies to the same as the serial test"""
|
||||
self.sourcesdir = str(self.serial_dependency.sourcesdir)
|
||||
self.has_receiver_output = bool(self.serial_dependency.has_receiver_output)
|
||||
self.snapshots = list(self.serial_dependency.snapshots)
|
||||
|
||||
@run_after("setup")
|
||||
def setup_reference_files(self):
|
||||
"""
|
||||
Set the reference file regression checks to the output of the
|
||||
serial test
|
||||
"""
|
||||
target = self.getdep(self._get_variant())
|
||||
self.reference_file = os.path.join(target.stagedir, target.output_file)
|
||||
self.snapshot_reference_files = target.snapshot_reference_files
|
||||
|
@@ -14,6 +14,9 @@ from reframe_tests.tests.regression_checks import (
|
||||
SnapshotRegressionCheck,
|
||||
)
|
||||
|
||||
# If using a static type checker, inherit from GprMaxBaseTest as the
|
||||
# Mixin classes should always have access to resources from that class.
|
||||
# However, during execution inherit from RegressionMixin.
|
||||
if TYPE_CHECKING:
|
||||
GprMaxMixin = GprMaxBaseTest
|
||||
else:
|
||||
@@ -39,13 +42,29 @@ class ReceiverMixin(GprMaxMixin):
|
||||
|
||||
|
||||
class SnapshotMixin(GprMaxMixin):
|
||||
"""Add regression tests for snapshots.
|
||||
|
||||
Attributes:
|
||||
snapshots (list[str]): List of snapshots to run regression
|
||||
checks on.
|
||||
"""
|
||||
|
||||
snapshots = variable(typ.List[str], value=[])
|
||||
|
||||
def build_snapshot_filepath(self, snapshot: str) -> Path:
|
||||
"""Build filepath to the specified snapshot.
|
||||
|
||||
Args:
|
||||
snapshot: Name of the snapshot.
|
||||
"""
|
||||
return Path(f"{self.model}_snaps", snapshot).with_suffix(".h5")
|
||||
|
||||
@run_after("setup")
|
||||
def add_snapshot_regression_checks(self):
|
||||
"""Add a regression check for each snapshot.
|
||||
|
||||
The test will be skipped if no snapshots have been specified.
|
||||
"""
|
||||
self.skip_if(
|
||||
len(self.snapshots) < 0,
|
||||
f"Must provide a list of snapshots.",
|
||||
@@ -59,29 +78,47 @@ class SnapshotMixin(GprMaxMixin):
|
||||
|
||||
|
||||
class PythonApiMixin(GprMaxMixin):
|
||||
"""Use the GprMax Python API rather than a standard input file."""
|
||||
|
||||
@run_after("setup")
|
||||
def set_python_input_file(self):
|
||||
"""Input files for API tests will be python files"""
|
||||
def use_python_input_file(self):
|
||||
"""Input files for API tests will be python files."""
|
||||
self.executable = "time -p python"
|
||||
self.input_file = self.input_file.with_suffix(".py")
|
||||
|
||||
|
||||
class MpiMixin(GprMaxMixin):
|
||||
"""Run test using GprMax MPI functionality.
|
||||
|
||||
Attributes:
|
||||
mpi_layout (parameter[list[int]]): ReFrame parameter to specify
|
||||
how MPI tasks should be arranged.
|
||||
"""
|
||||
|
||||
mpi_layout = parameter()
|
||||
|
||||
@run_after("setup")
|
||||
def configure_mpi_tasks(self):
|
||||
"""Add MPI specific commandline arguments"""
|
||||
"""Set num_tasks and add MPI specific commandline arguments."""
|
||||
self.num_tasks = int(prod(self.mpi_layout))
|
||||
self.executable_opts += ["--mpi", *map(str, self.mpi_layout)]
|
||||
|
||||
|
||||
class BScanMixin(GprMaxMixin):
|
||||
"""Test a B-scan model - a model with a moving source and receiver.
|
||||
|
||||
Attributes:
|
||||
num_models (parameter[int]): Number of models to run.
|
||||
"""
|
||||
|
||||
num_models = parameter()
|
||||
|
||||
@run_after("setup")
|
||||
def setup_bscan_test(self):
|
||||
"""Add B-Scan specific commandline arguments and postrun cmds"""
|
||||
"""Add B-scan specific commandline arguments and postrun cmds.
|
||||
|
||||
Set the number of models to run, and merge the output files.
|
||||
"""
|
||||
self.executable_opts += ["-n", str(self.num_models)]
|
||||
|
||||
self.postrun_cmds += [
|
||||
@@ -89,18 +126,41 @@ class BScanMixin(GprMaxMixin):
|
||||
f"mv {self.model}_merged.h5 {self.output_file}",
|
||||
]
|
||||
|
||||
def get_test_dependency_variant_name(self) -> Optional[str]:
|
||||
return super().get_test_dependency_variant_name(num_models=self.num_models)
|
||||
def get_test_dependency_variant_name(self, **kwargs) -> Optional[str]:
|
||||
"""Get unique ReFrame name of the test dependency variant.
|
||||
|
||||
By default, filter test dependencies by the model name and the
|
||||
number of models.
|
||||
|
||||
Args:
|
||||
**kwargs: Additional key-value pairs to filter the parameter
|
||||
space of the test dependency. The key is the test
|
||||
parameter name and the value is either a single value or
|
||||
a unary function that evaluates to True if the parameter
|
||||
point must be kept, False otherwise.
|
||||
|
||||
Returns:
|
||||
variant_name: Unique name of the test dependency variant.
|
||||
"""
|
||||
|
||||
kwargs.setdefault("num_models", self.num_models)
|
||||
return super().get_test_dependency_variant_name(**kwargs)
|
||||
|
||||
|
||||
class TaskfarmMixin(GprMaxMixin):
|
||||
"""Run test using GprMax taskfarm functionality."""
|
||||
|
||||
# TODO: Make this a required variabe, or create a new variable to
|
||||
# proxy it.
|
||||
# num_tasks = required
|
||||
|
||||
@run_after("setup")
|
||||
def add_taskfarm_flag(self):
|
||||
"""Add taskfarm specific commandline arguments"""
|
||||
"""Add taskfarm specific commandline arguments."""
|
||||
self.executable_opts += ["--taskfarm"]
|
||||
|
||||
|
||||
class AntennaModelMixin(GprMaxMixin):
|
||||
"""Test an antenna model."""
|
||||
|
||||
pass
|
||||
|
@@ -9,20 +9,37 @@ from reframe.utility import osext
|
||||
|
||||
|
||||
class RegressionCheck:
|
||||
"""Compare two .h5 files using h5diff"""
|
||||
"""Compare two hdf5 files using h5diff"""
|
||||
|
||||
def __init__(
|
||||
self, output_file: Union[str, PathLike], reference_file: Union[str, PathLike]
|
||||
) -> None:
|
||||
"""Create a new regression check.
|
||||
|
||||
Args:
|
||||
output_file: Path to output file generate by the test.
|
||||
reference_file: Path to reference file to run the regression
|
||||
check against.
|
||||
"""
|
||||
self.output_file = Path(output_file)
|
||||
self.reference_file = Path(reference_file)
|
||||
self.h5diff_options: list[str] = []
|
||||
|
||||
@property
|
||||
def error_msg(self) -> str:
|
||||
"""Message to display if the regression check fails"""
|
||||
return "Failed regression check"
|
||||
|
||||
def create_reference_file(self) -> bool:
|
||||
"""Create reference file if it does not already exist.
|
||||
|
||||
The reference file is created as a copy of the current output
|
||||
file.
|
||||
|
||||
Returns:
|
||||
file_created: Returns True if a new file was created, False
|
||||
if the path already exists.
|
||||
"""
|
||||
if not sn.path_exists(self.reference_file):
|
||||
self.reference_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
copyfile(self.output_file, self.reference_file)
|
||||
@@ -31,9 +48,26 @@ class RegressionCheck:
|
||||
return False
|
||||
|
||||
def reference_file_exists(self) -> bool:
|
||||
"""Check if the reference file exists.
|
||||
|
||||
Returns:
|
||||
file_exists: Returns true if the reference filepath is a
|
||||
regular file, False otherwise.
|
||||
"""
|
||||
return sn.path_isfile(self.reference_file)
|
||||
|
||||
def run(self) -> Literal[True]:
|
||||
"""Run the regression check using h5diff.
|
||||
|
||||
Returns:
|
||||
check_passed: Returns True if the output file matches the
|
||||
reference file (i.e. no output from h5diff). Otherwise,
|
||||
raises a SanityError.
|
||||
|
||||
Raises:
|
||||
reframe.core.exceptions.SanityError: If the output file does
|
||||
not exist, or the regression check fails.
|
||||
"""
|
||||
if runtime().system.name == "archer2":
|
||||
h5diff = "/opt/cray/pe/hdf5/default/bin/h5diff"
|
||||
else:
|
||||
@@ -50,7 +84,6 @@ class RegressionCheck:
|
||||
h5diff_output.stdout,
|
||||
(
|
||||
f"{self.error_msg}\n"
|
||||
# f"For more details run: 'h5diff {' '.join(self.h5diff_options)} {self.output_file} {self.reference_file}'\n"
|
||||
f"For more details run: '{' '.join(h5diff_output.args)}'\n"
|
||||
f"To re-create regression file, delete '{self.reference_file}' and rerun the test."
|
||||
),
|
||||
@@ -58,6 +91,12 @@ class RegressionCheck:
|
||||
|
||||
|
||||
class ReceiverRegressionCheck(RegressionCheck):
|
||||
"""Run regression check on individual reveivers in output files.
|
||||
|
||||
This can include arbitrary receivers in each file, or two receivers
|
||||
in the same file.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
output_file: Union[str, PathLike],
|
||||
@@ -65,6 +104,17 @@ class ReceiverRegressionCheck(RegressionCheck):
|
||||
output_receiver: Optional[str],
|
||||
reference_receiver: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Create a new receiver regression check.
|
||||
|
||||
Args:
|
||||
output_file: Path to output file generate by the test.
|
||||
reference_file: Path to reference file to run the regression
|
||||
check against.
|
||||
output_receiver: Output receiver to check.
|
||||
reference_receiver: Optional receiver to check against in
|
||||
the reference file. If None, this will be the same as
|
||||
the output receiver.
|
||||
"""
|
||||
super().__init__(output_file, reference_file)
|
||||
|
||||
self.output_receiver = output_receiver
|
||||
@@ -80,6 +130,8 @@ class ReceiverRegressionCheck(RegressionCheck):
|
||||
|
||||
|
||||
class SnapshotRegressionCheck(RegressionCheck):
|
||||
"""Run regression check on a gprMax Snapshot."""
|
||||
|
||||
@property
|
||||
def error_msg(self) -> str:
|
||||
return f"Snapshot '{self.output_file.name}' failed regression check "
|
||||
|
在新工单中引用
屏蔽一个用户