Begin refactoring tests to use ReFrame

这个提交包含在:
nmannall
2024-01-12 17:22:36 +00:00
父节点 220f0db099
当前提交 2f381442a9
共有 35 个文件被更改,包括 209 次插入88 次删除

查看文件

@@ -19,10 +19,9 @@ numpy-stl
terminaltables
tqdm
wheel
reframe-hpcs
pytest
pytest-benchmark
pytest-benchmark[histogram]
git+https://github.com/NMannall/pytest-easyMPI.git
# pytest-mpi
pytest-regressions
git+https://github.com/craig-warren/PyEVTK.git

6
tests/.gitignore vendored
查看文件

@@ -1 +1,5 @@
tmp/
output/
stage/
reframe.log
reframe.out
reframe_perf.out

51
tests/base_tests.py 普通文件
查看文件

@@ -0,0 +1,51 @@
"""ReFrame base classes for GprMax tests"""
import os
import reframe as rfm
import reframe.utility.sanity as sn
from reframe.utility import udeps
@rfm.simple_test
class CreatePyenvTest(rfm.RunOnlyRegressionTest):
valid_systems = ["archer2:compute"]
valid_prog_environs = ["PrgEnv-cray"]
modules = ["cray-python"]
# DOES NOT CURRENTLY WORK!!!
prerun_cmds = [
"python -m venv --system-site-packages --prompt gprMax .venv",
"source .venv/bin/activate",
"pip install -r requirements.txt"
]
executable = "pip install -e ."
keep_files = ["requirements.txt"]
@sanity_function
def test_requirements_installed(self):
return sn.assert_found(r'Successfully installed ', self.stdout) and sn.assert_not_found(r'ERROR', self.stdout)
class GprmaxBaseTest(rfm.RunOnlyRegressionTest):
valid_systems = ["archer2:compute"]
valid_prog_environs = ["PrgEnv-cray"]
executable = "python -m gprMax --log-level 25"
exclusive_access = True
prerun_cmds = ["source .venv/bin/activate"]
@run_after("init")
def setup_omp(self):
self.env_vars = {
"OMP_NUM_THREADS": str(self.num_cpus_per_task)
}
@run_after("init")
def inject_dependencies(self):
self.depends_on("CreatePyenvTest", udeps.fully)
@require_deps
def set_sourcedir(self, CreatePyenvTest):
self.sourcesdir = ['src', CreatePyenvTest(part="archer2:compute", environ="PrgEnv-cray").stagedir]
@sanity_function
def test_simulation_complete(self):
return sn.assert_found(r'=== Simulation completed in ', self.stdout)

查看文件

@@ -0,0 +1,105 @@
site_configuration = {
'systems': [
{
'name': 'archer2',
'descr': 'ARCHER2',
'hostnames': ['uan','ln','dvn'],
'modules_system': 'lmod',
'partitions': [
{
'name': 'login',
'descr': 'Login nodes',
'scheduler': 'local',
'launcher': 'local',
'environs': ['PrgEnv-gnu','PrgEnv-cray','PrgEnv-aocc'],
},
{
'name': 'compute',
'descr': 'Compute nodes',
'scheduler': 'slurm',
'launcher': 'srun',
'access': ['--hint=nomultithread','--distribution=block:block','--partition=standard','--qos=standard'],
'environs': ['PrgEnv-gnu','PrgEnv-cray','PrgEnv-aocc'],
'max_jobs': 16,
}
]
}
],
'environments': [
{
'name': 'PrgEnv-gnu',
'modules': ['PrgEnv-gnu'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
{
'name': 'PrgEnv-cray',
'modules': ['PrgEnv-cray'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
{
'name': 'PrgEnv-aocc',
'modules': ['PrgEnv-aocc'],
'cc': 'cc',
'cxx': 'CC',
'ftn': 'ftn',
'target_systems': ['archer2']
},
],
'logging': [
{
'level': 'debug',
'handlers': [
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s'
},
{
'type': 'file',
'name': 'reframe.out',
'level': 'info',
'format': '[%(asctime)s] %(check_info)s: %(message)s',
'append': True
},
{
'type': 'file',
'name': 'reframe.log',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s %(levelno)s: %(check_info)s: %(message)s', # noqa: E501
'append': False
}
],
'handlers_perflog': [
{
'type': 'file',
'name': 'reframe_perf.out',
'level': 'info',
'format': '[%(asctime)s] %(check_info)s: %(check_perf_var)s=%(check_perf_value)s (ref=%(check_perf_ref)s;l=%(check_perf_lower_thres)s;u=%(check_perf_upper_thres)s)) %(check_perf_unit)s',
'append': True
},
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_job_completion_time)s|reframe %(version)s|'
'%(check_info)s|jobid=%(check_jobid)s|'
'%(check_perf_var)s=%(check_perf_value)s|'
'ref=%(check_perf_ref)s '
'(l=%(check_perf_lower_thres)s, '
'u=%(check_perf_upper_thres)s)|'
'%(check_perf_unit)s'
),
'append': True
}
]
}
],
}

查看文件

@@ -1,8 +0,0 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussianprime 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#rx: 0.070 0.070 0.070

44
tests/reframe_tests.py 普通文件
查看文件

@@ -0,0 +1,44 @@
from pathlib import Path
import reframe as rfm
from reframe.core.builtins import parameter
from base_tests import GprmaxBaseTest
"""ReFrame tests for taskfarm functionality
Usage:
cd gprMax/tests
reframe -C configuraiton/{CONFIG_FILE} -c test_mpi.py -r
"""
@rfm.simple_test
class BScanTest(GprmaxBaseTest):
executable_opts = "cylinder_Bscan_2D.in -n 64 -mpi".split()
num_tasks = 8
num_cpus_per_task = 16
@rfm.simple_test
class BasicModelsTest(GprmaxBaseTest):
# List of available basic test models
model = parameter([
"2D_ExHyHz"
"2D_EyHxHz",
"2D_EzHxHy",
"cylinder_Ascan_2D",
"hertzian_dipole_fs",
"hertzian_dipole_hs",
"hertzian_dipole_dispersive",
"magnetic_dipole_fs",
])
num_cpus_per_task = 16
@run_after("init")
def set_model(self):
self.executable_opts = f"{self.model}.in -o {self.model}.h5".split()
self.keep_files = [f"{self.model}.in", f"{self.model}.h5"]

查看文件

@@ -12,7 +12,7 @@ import pytest
import gprMax
# Cube side lengths (in cells) for different domains
DOMAINS = [0.10] # [0.10, 0.15, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80]
DOMAINS = [0.10, 0.15, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80]
# Number of OpenMP threads to benchmark each domain size
OMP_THREADS = [1, 2, 4, 8, 16, 32, 64, 128]
@@ -58,7 +58,7 @@ def test_simple_benchmarks(request, benchmark, domain, omp_threads):
scenes.append(scene)
# Run benchmark once (i.e. 1 round)
# benchmark.pedantic(gprMax.run, kwargs={'scenes': scenes, 'n': len(scenes), 'geometry_only': False, 'outputfile': output_filepath, 'gpu': None})
benchmark.pedantic(gprMax.run, kwargs={'scenes': scenes, 'n': len(scenes), 'geometry_only': False, 'outputfile': output_filepath, 'gpu': None})
# Automatically choose number of rounds.
benchmark(gprMax.run, scenes=scenes, n=len(scenes), geometry_only=False, outputfile=output_filepath, gpu=None)
# benchmark(gprMax.run, scenes=scenes, n=len(scenes), geometry_only=False, outputfile=output_filepath, gpu=None)

查看文件

@@ -1,74 +0,0 @@
# Copyright (C) 2015-2023: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import pytest
from pytest_easyMPI import mpi_parallel
import gprMax
from gprMax.utilities.logging import logging_config
logger = logging.getLogger(__name__)
logging_config(name=__name__)
if sys.platform == "linux":
plt.switch_backend("agg")
"""Compare field outputs
Usage:
cd gprMax
pytest tests/test_models.py
"""
# Specify directory containing basic models to test
BSCAN_MODELS_DIRECTORY = Path(__file__).parent / "data" / "models_bscan"
# List of available basic test models
BSCAN_MODELS = [
"cylinder_Bscan_2D",
]
FIELD_COMPONENTS_BASE_PATH = "/rxs/rx1/"
def run_test(model_name, input_base, data_directory, analytical_func=None, gpu=None, opencl=None):
input_filepath = input_base.with_suffix(".in")
reference_filepath = Path(f"{input_base}_ref.h5")
output_base = data_directory / model_name
output_filepath = output_base.with_suffix(".h5")
# Run model
gprMax.run(inputfile=input_filepath, outputfile=output_filepath, gpu=gpu, opencl=opencl, n=31, mpi=True)
@pytest.mark.parametrize("model", BSCAN_MODELS)
@pytest.mark.parametrize("n", [2, 4, 8, 16, 32])
@mpi_parallel("n")
def test_bscan_models(model, datadir, n):
base_filepath = Path(BSCAN_MODELS_DIRECTORY, model, model)
run_test(model, base_filepath, datadir)