你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-06 04:26:52 +08:00
Merge branch 'optimisation-taguchi'
这个提交包含在:
@@ -39,6 +39,7 @@ class GeometryView:
|
|||||||
filename (str): Filename to save to.
|
filename (str): Filename to save to.
|
||||||
type (str): Either 'n' for a per cell geometry view, or 'f' for a per cell edge geometry view.
|
type (str): Either 'n' for a per cell geometry view, or 'f' for a per cell edge geometry view.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.xs = xs
|
self.xs = xs
|
||||||
self.ys = ys
|
self.ys = ys
|
||||||
self.zs = zs
|
self.zs = zs
|
||||||
|
336
gprMax/gprMax.py
336
gprMax/gprMax.py
@@ -22,16 +22,17 @@
|
|||||||
__version__ = '3.0.0b14'
|
__version__ = '3.0.0b14'
|
||||||
versionname = ' (Bowmore)'
|
versionname = ' (Bowmore)'
|
||||||
|
|
||||||
import sys, os, datetime, itertools, argparse
|
import sys, os, datetime, itertools, argparse, importlib
|
||||||
if sys.platform != 'win32':
|
if sys.platform != 'win32':
|
||||||
import resource
|
import resource
|
||||||
from time import perf_counter
|
from time import perf_counter
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from gprMax.constants import e0
|
from gprMax.constants import c, e0, m0, z0, floattype
|
||||||
from gprMax.exceptions import CmdInputError
|
from gprMax.exceptions import CmdInputError
|
||||||
from gprMax.fields_update import *
|
from gprMax.fields_update import *
|
||||||
from gprMax.grid import FDTDGrid
|
from gprMax.grid import FDTDGrid
|
||||||
@@ -56,85 +57,282 @@ def main():
|
|||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
parser = argparse.ArgumentParser(prog='gprMax', description='Electromagnetic modelling software based on the Finite-Difference Time-Domain (FDTD) method')
|
parser = argparse.ArgumentParser(prog='gprMax', description='Electromagnetic modelling software based on the Finite-Difference Time-Domain (FDTD) method')
|
||||||
parser.add_argument('inputfile', help='path to and name of inputfile')
|
parser.add_argument('inputfile', help='path to and name of inputfile')
|
||||||
parser.add_argument('--geometry-only', action='store_true', default=False, help='only build model and produce geometry files')
|
|
||||||
parser.add_argument('-n', default=1, type=int, help='number of times to run the input file')
|
parser.add_argument('-n', default=1, type=int, help='number of times to run the input file')
|
||||||
parser.add_argument('-mpi', action='store_true', default=False, help='switch on MPI')
|
parser.add_argument('-mpi', action='store_true', default=False, help='switch on MPI')
|
||||||
parser.add_argument('--commands-python', action='store_true', default=False, help='write an input file after any Python code blocks in the original input file have been processed')
|
parser.add_argument('--geometry-only', action='store_true', default=False, help='only build model and produce geometry file(s)')
|
||||||
|
parser.add_argument('--write-python', action='store_true', default=False, help='write an input file after any Python code blocks in the original input file have been processed')
|
||||||
|
parser.add_argument('--opt-taguchi', action='store_true', default=False, help='optimise parameters using the Taguchi optimisation method')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
numbermodelruns = args.n
|
numbermodelruns = args.n
|
||||||
inputdirectory = os.path.dirname(os.path.abspath(args.inputfile)) + os.sep
|
inputdirectory = os.path.dirname(os.path.abspath(args.inputfile)) + os.sep
|
||||||
inputfile = inputdirectory + os.path.basename(args.inputfile)
|
inputfile = inputdirectory + os.path.basename(args.inputfile)
|
||||||
|
inputfileparts = os.path.splitext(inputfile)
|
||||||
|
|
||||||
print('Model input file: {}\n'.format(inputfile))
|
# Create a separate namespace that users can access in any Python code blocks in the input file
|
||||||
|
usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': numbermodelruns, 'inputdirectory': inputdirectory}
|
||||||
|
|
||||||
|
if args.opt_taguchi and numbermodelruns > 1:
|
||||||
|
raise CmdInputError('When a Taguchi optimisation is being carried out the number of model runs argument is not required')
|
||||||
|
|
||||||
# Mixed mode MPI/OpenMP - task farm for model runs with MPI; each model parallelised with OpenMP
|
########################################
|
||||||
if args.mpi:
|
# Process for Taguchi optimisation #
|
||||||
from mpi4py import MPI
|
########################################
|
||||||
|
if args.opt_taguchi:
|
||||||
|
from user_libs.optimisations.taguchi import taguchi_code_blocks, select_OA, calculate_ranges_experiments, calculate_optimal_levels, plot_optimisation_history
|
||||||
|
|
||||||
# Define MPI message tags
|
# Default maximum number of iterations of optimisation to perform (used if the stopping criterion is not achieved)
|
||||||
tags = Enum('tags', {'READY': 0, 'DONE': 1, 'EXIT': 2, 'START': 3})
|
maxiterations = 20
|
||||||
|
|
||||||
|
# Process Taguchi code blocks in the input file; pass in ordered dictionary to hold parameters to optimise
|
||||||
|
tmp = usernamespace.copy()
|
||||||
|
tmp.update({'optparams': OrderedDict()})
|
||||||
|
taguchinamespace = taguchi_code_blocks(inputfile, tmp)
|
||||||
|
|
||||||
|
# Extract dictionaries and variables containing initialisation parameters
|
||||||
|
optparams = taguchinamespace['optparams']
|
||||||
|
fitness = taguchinamespace['fitness']
|
||||||
|
if 'maxiterations' in taguchinamespace:
|
||||||
|
maxiterations = taguchinamespace['maxiterations']
|
||||||
|
|
||||||
# Initializations and preliminaries
|
# Store initial parameter ranges
|
||||||
comm = MPI.COMM_WORLD # get MPI communicator object
|
optparamsinit = list(optparams.items())
|
||||||
size = comm.size # total number of processes
|
|
||||||
rank = comm.rank # rank of this process
|
|
||||||
status = MPI.Status() # get MPI status object
|
|
||||||
name = MPI.Get_processor_name() # get name of processor/host
|
|
||||||
|
|
||||||
if rank == 0:
|
# Dictionary to hold history of optmised values of parameters
|
||||||
# Master process
|
optparamshist = OrderedDict((key, list()) for key in optparams)
|
||||||
modelrun = 1
|
|
||||||
numworkers = size - 1
|
# Import specified fitness function
|
||||||
closedworkers = 0
|
fitness_metric = getattr(importlib.import_module('user_libs.optimisations.taguchi_fitness'), fitness['name'])
|
||||||
print('Master: PID {} on {} using {} workers.'.format(os.getpid(), name, numworkers))
|
|
||||||
while closedworkers < numworkers:
|
# Select OA
|
||||||
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
|
OA, N, k, s = select_OA(optparams)
|
||||||
source = status.Get_source()
|
|
||||||
tag = status.Get_tag()
|
# Initialise arrays and lists to store parameters required throughout optimisation
|
||||||
if tag == tags.READY.value:
|
# Lower, central, and upper values for each parameter
|
||||||
# Worker is ready, so send it a task
|
levels = np.zeros((s, k), dtype=floattype)
|
||||||
if modelrun < numbermodelruns + 1:
|
# Optimal lower, central, or upper value for each parameter
|
||||||
comm.send(modelrun, dest=source, tag=tags.START.value)
|
levelsopt = np.zeros(k, dtype=floattype)
|
||||||
print('Master: sending model {} to worker {}.'.format(modelrun, source))
|
# Difference used to set values for levels
|
||||||
modelrun += 1
|
levelsdiff = np.zeros(k, dtype=floattype)
|
||||||
else:
|
# History of fitness values from each confirmation experiment
|
||||||
comm.send(None, dest=source, tag=tags.EXIT.value)
|
fitnessvalueshist = []
|
||||||
elif tag == tags.DONE.value:
|
|
||||||
print('Worker {}: completed.'.format(source))
|
i = 0
|
||||||
elif tag == tags.EXIT.value:
|
while i < maxiterations:
|
||||||
print('Worker {}: exited.'.format(source))
|
# Set number of model runs to number of experiments
|
||||||
closedworkers += 1
|
numbermodelruns = N
|
||||||
else:
|
usernamespace['number_model_runs'] = numbermodelruns
|
||||||
# Worker process
|
|
||||||
|
|
||||||
print('Worker {}: PID {} on {} requesting {} OpenMP threads.'.format(rank, os.getpid(), name, os.environ.get('OMP_NUM_THREADS')))
|
# Fitness values for each experiment
|
||||||
while True:
|
fitnessvalues = []
|
||||||
comm.send(None, dest=0, tag=tags.READY.value)
|
|
||||||
# Receive a model number to run from the master
|
# Set parameter ranges and define experiments
|
||||||
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
|
optparams, levels, levelsdiff = calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, i)
|
||||||
tag = status.Get_tag()
|
|
||||||
|
# Mixed mode MPI/OpenMP - task farm for model runs with MPI; each model parallelised with OpenMP
|
||||||
if tag == tags.START.value:
|
if args.mpi:
|
||||||
# Run a model
|
from mpi4py import MPI
|
||||||
run_model(args, modelrun, numbermodelruns, inputfile, inputdirectory)
|
|
||||||
comm.send(None, dest=0, tag=tags.DONE.value)
|
# Define MPI message tags
|
||||||
elif tag == tags.EXIT.value:
|
tags = Enum('tags', {'READY': 0, 'DONE': 1, 'EXIT': 2, 'START': 3})
|
||||||
|
|
||||||
|
# Initializations and preliminaries
|
||||||
|
comm = MPI.COMM_WORLD # get MPI communicator object
|
||||||
|
size = comm.size # total number of processes
|
||||||
|
rank = comm.rank # rank of this process
|
||||||
|
status = MPI.Status() # get MPI status object
|
||||||
|
name = MPI.Get_processor_name() # get name of processor/host
|
||||||
|
|
||||||
|
if rank == 0:
|
||||||
|
# Master process
|
||||||
|
modelrun = 1
|
||||||
|
numworkers = size - 1
|
||||||
|
closedworkers = 0
|
||||||
|
print('Master: PID {} on {} using {} workers.'.format(os.getpid(), name, numworkers))
|
||||||
|
while closedworkers < numworkers:
|
||||||
|
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
|
||||||
|
source = status.Get_source()
|
||||||
|
tag = status.Get_tag()
|
||||||
|
if tag == tags.READY.value:
|
||||||
|
# Worker is ready, so send it a task
|
||||||
|
if modelrun < numbermodelruns + 1:
|
||||||
|
comm.send(modelrun, dest=source, tag=tags.START.value)
|
||||||
|
print('Master: sending model {} to worker {}.'.format(modelrun, source))
|
||||||
|
modelrun += 1
|
||||||
|
else:
|
||||||
|
comm.send(None, dest=source, tag=tags.EXIT.value)
|
||||||
|
elif tag == tags.DONE.value:
|
||||||
|
print('Worker {}: completed.'.format(source))
|
||||||
|
elif tag == tags.EXIT.value:
|
||||||
|
print('Worker {}: exited.'.format(source))
|
||||||
|
closedworkers += 1
|
||||||
|
else:
|
||||||
|
# Worker process
|
||||||
|
print('Worker {}: PID {} on {} requesting {} OpenMP threads.'.format(rank, os.getpid(), name, os.environ.get('OMP_NUM_THREADS')))
|
||||||
|
while True:
|
||||||
|
comm.send(None, dest=0, tag=tags.READY.value)
|
||||||
|
# Receive a model number to run from the master
|
||||||
|
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
|
||||||
|
tag = status.Get_tag()
|
||||||
|
|
||||||
|
if tag == tags.START.value:
|
||||||
|
# Run a model
|
||||||
|
# Add specific value for each parameter to optimise for each experiment to user accessible namespace
|
||||||
|
optnamespace = usernamespace.copy()
|
||||||
|
tmp = {}
|
||||||
|
tmp.update((key, value[modelrun - 1]) for key, value in optparams.items())
|
||||||
|
optnamespace.update({'optparams': tmp})
|
||||||
|
run_model(args, modelrun, numbermodelruns, inputfile, usernamespace)
|
||||||
|
comm.send(None, dest=0, tag=tags.DONE.value)
|
||||||
|
elif tag == tags.EXIT.value:
|
||||||
|
break
|
||||||
|
|
||||||
|
comm.send(None, dest=0, tag=tags.EXIT.value)
|
||||||
|
|
||||||
|
# Standard behaviour - models run serially; each model parallelised with OpenMP
|
||||||
|
else:
|
||||||
|
tsimstart = perf_counter()
|
||||||
|
for modelrun in range(1, numbermodelruns + 1):
|
||||||
|
# Add specific value for each parameter to optimise, for each experiment to user accessible namespace
|
||||||
|
optnamespace = usernamespace.copy()
|
||||||
|
tmp = {}
|
||||||
|
tmp.update((key, value[modelrun - 1]) for key, value in optparams.items())
|
||||||
|
optnamespace.update({'optparams': tmp})
|
||||||
|
run_model(args, modelrun, numbermodelruns, inputfile, optnamespace)
|
||||||
|
tsimend = perf_counter()
|
||||||
|
print('\nTotal simulation time [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart))))
|
||||||
|
|
||||||
|
# Calculate fitness value for each experiment
|
||||||
|
for exp in range(1, numbermodelruns + 1):
|
||||||
|
outputfile = inputfileparts[0] + str(exp) + '.out'
|
||||||
|
fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
|
||||||
|
os.remove(outputfile)
|
||||||
|
|
||||||
|
print('\nTaguchi optimisation, iteration {}: completed initial {} experiments completed with fitness values {}.'.format(i + 1, numbermodelruns, fitnessvalues))
|
||||||
|
|
||||||
|
# Calculate optimal levels from fitness values by building a response table; update dictionary of parameters with optimal values
|
||||||
|
optparams, levelsopt = calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k)
|
||||||
|
|
||||||
|
# Run a confirmation experiment with optimal values
|
||||||
|
numbermodelruns = 1
|
||||||
|
usernamespace['number_model_runs'] = numbermodelruns
|
||||||
|
tsimstart = perf_counter()
|
||||||
|
for modelrun in range(1, numbermodelruns + 1):
|
||||||
|
# Add specific value for each parameter to optimise, for each experiment to user accessible namespace
|
||||||
|
optnamespace = usernamespace.copy()
|
||||||
|
tmp = {}
|
||||||
|
for key, value in optparams.items():
|
||||||
|
tmp[key] = value[modelrun - 1]
|
||||||
|
optparamshist[key].append(value[modelrun - 1])
|
||||||
|
optnamespace.update({'optparams': tmp})
|
||||||
|
run_model(args, modelrun, numbermodelruns, inputfile, optnamespace)
|
||||||
|
tsimend = perf_counter()
|
||||||
|
print('\nTotal simulation time [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart))))
|
||||||
|
|
||||||
|
# Calculate fitness value for confirmation experiment
|
||||||
|
outputfile = inputfileparts[0] + '.out'
|
||||||
|
fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))
|
||||||
|
|
||||||
|
# Rename confirmation experiment output file so that it is retained for each iteraction
|
||||||
|
os.rename(outputfile, os.path.splitext(outputfile)[0] + '_final' + str(i + 1) + '.out')
|
||||||
|
|
||||||
|
print('\nTaguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'.format(i + 1, dict(optparamshist), fitnessvalueshist, 68*'*'))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
# Stop optimisation if stopping criterion has been reached
|
||||||
|
if fitnessvalueshist[i - 1] > fitness['stop']:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Stop optimisation if successive fitness values are within 1%
|
||||||
|
if i > 2:
|
||||||
|
fitnessvaluesclose = (np.abs(fitnessvalueshist[i - 2] - fitnessvalueshist[i - 1]) / fitnessvalueshist[i - 1]) * 100
|
||||||
|
if fitnessvaluesclose < 1:
|
||||||
break
|
break
|
||||||
|
|
||||||
comm.send(None, dest=0, tag=tags.EXIT.value)
|
# Save optimisation parameters history and fitness values history to file
|
||||||
|
opthistfile = inputfileparts[0] + '_hist'
|
||||||
|
np.savez(opthistfile, dict(optparamshist), fitnessvalueshist)
|
||||||
|
|
||||||
# Standard behaviour - models run serially; each model parallelised with OpenMP
|
print('\n{}\nTaguchi optimisation completed after {} iteration(s).\nHistory of optimal parameter values {} and of fitness values {}\n{}\n'.format(68*'*', i, dict(optparamshist), fitnessvalueshist, 68*'*'))
|
||||||
|
|
||||||
|
# Plot the history of fitness values and each optimised parameter values for the optimisation
|
||||||
|
plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit)
|
||||||
|
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
# Process for standard simulation #
|
||||||
|
#######################################
|
||||||
else:
|
else:
|
||||||
tsimstart = perf_counter()
|
if args.mpi and numbermodelruns == 1:
|
||||||
for modelrun in range(1, numbermodelruns + 1):
|
raise CmdInputError('MPI is not beneficial when there is only one model to run')
|
||||||
run_model(args, modelrun, numbermodelruns, inputfile, inputdirectory)
|
|
||||||
tsimend = perf_counter()
|
|
||||||
print('\nTotal simulation time [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart))))
|
|
||||||
|
|
||||||
print('\nSimulation completed.\n{}\n'.format(65*'*'))
|
# Mixed mode MPI/OpenMP - task farm for model runs with MPI; each model parallelised with OpenMP
|
||||||
|
if args.mpi:
|
||||||
|
from mpi4py import MPI
|
||||||
|
|
||||||
|
# Define MPI message tags
|
||||||
|
tags = Enum('tags', {'READY': 0, 'DONE': 1, 'EXIT': 2, 'START': 3})
|
||||||
|
|
||||||
|
# Initializations and preliminaries
|
||||||
|
comm = MPI.COMM_WORLD # get MPI communicator object
|
||||||
|
size = comm.size # total number of processes
|
||||||
|
rank = comm.rank # rank of this process
|
||||||
|
status = MPI.Status() # get MPI status object
|
||||||
|
name = MPI.Get_processor_name() # get name of processor/host
|
||||||
|
|
||||||
|
if rank == 0:
|
||||||
|
# Master process
|
||||||
|
modelrun = 1
|
||||||
|
numworkers = size - 1
|
||||||
|
closedworkers = 0
|
||||||
|
print('Master: PID {} on {} using {} workers.'.format(os.getpid(), name, numworkers))
|
||||||
|
while closedworkers < numworkers:
|
||||||
|
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
|
||||||
|
source = status.Get_source()
|
||||||
|
tag = status.Get_tag()
|
||||||
|
if tag == tags.READY.value:
|
||||||
|
# Worker is ready, so send it a task
|
||||||
|
if modelrun < numbermodelruns + 1:
|
||||||
|
comm.send(modelrun, dest=source, tag=tags.START.value)
|
||||||
|
print('Master: sending model {} to worker {}.'.format(modelrun, source))
|
||||||
|
modelrun += 1
|
||||||
|
else:
|
||||||
|
comm.send(None, dest=source, tag=tags.EXIT.value)
|
||||||
|
elif tag == tags.DONE.value:
|
||||||
|
print('Worker {}: completed.'.format(source))
|
||||||
|
elif tag == tags.EXIT.value:
|
||||||
|
print('Worker {}: exited.'.format(source))
|
||||||
|
closedworkers += 1
|
||||||
|
else:
|
||||||
|
# Worker process
|
||||||
|
print('Worker {}: PID {} on {} requesting {} OpenMP threads.'.format(rank, os.getpid(), name, os.environ.get('OMP_NUM_THREADS')))
|
||||||
|
while True:
|
||||||
|
comm.send(None, dest=0, tag=tags.READY.value)
|
||||||
|
# Receive a model number to run from the master
|
||||||
|
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
|
||||||
|
tag = status.Get_tag()
|
||||||
|
|
||||||
|
if tag == tags.START.value:
|
||||||
|
# Run a model
|
||||||
|
run_model(args, modelrun, numbermodelruns, inputfile, usernamespace)
|
||||||
|
comm.send(None, dest=0, tag=tags.DONE.value)
|
||||||
|
elif tag == tags.EXIT.value:
|
||||||
|
break
|
||||||
|
|
||||||
|
comm.send(None, dest=0, tag=tags.EXIT.value)
|
||||||
|
|
||||||
|
# Standard behaviour - models run serially; each model parallelised with OpenMP
|
||||||
|
else:
|
||||||
|
tsimstart = perf_counter()
|
||||||
|
for modelrun in range(1, numbermodelruns + 1):
|
||||||
|
run_model(args, modelrun, numbermodelruns, inputfile, usernamespace)
|
||||||
|
tsimend = perf_counter()
|
||||||
|
print('\nTotal simulation time [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart))))
|
||||||
|
|
||||||
|
print('\nSimulation completed.\n{}\n'.format(68*'*'))
|
||||||
|
|
||||||
|
|
||||||
def run_model(args, modelrun, numbermodelruns, inputfile, inputdirectory):
|
def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||||
"""Runs a model - processes the input file; builds the Yee cells; calculates update coefficients; runs main FDTD loop.
|
"""Runs a model - processes the input file; builds the Yee cells; calculates update coefficients; runs main FDTD loop.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -142,14 +340,20 @@ def run_model(args, modelrun, numbermodelruns, inputfile, inputdirectory):
|
|||||||
modelrun (int): Current model run number.
|
modelrun (int): Current model run number.
|
||||||
numbermodelruns (int): Total number of model runs.
|
numbermodelruns (int): Total number of model runs.
|
||||||
inputfile (str): Name of the input file to open.
|
inputfile (str): Name of the input file to open.
|
||||||
inputdirectory (str): Path to the directory containing the inputfile.
|
usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
print('\n{}\n\nModel input file: {}\n'.format(68*'*', inputfile))
|
||||||
|
|
||||||
|
# Add the current model run to namespace that can be accessed by user in any Python code blocks in input file
|
||||||
|
usernamespace['current_model_run'] = modelrun
|
||||||
|
print('Constants/variables available for Python scripting: {}\n'.format(usernamespace))
|
||||||
|
|
||||||
# Process any user input Python commands
|
# Process any user input Python commands
|
||||||
processedlines = python_code_blocks(inputfile, modelrun, numbermodelruns, inputdirectory)
|
processedlines = python_code_blocks(inputfile, usernamespace)
|
||||||
|
|
||||||
# Write a file containing the input commands after Python blocks have been processed
|
# Write a file containing the input commands after Python blocks have been processed
|
||||||
if args.commands_python:
|
if args.write_python:
|
||||||
write_python_processed(inputfile, modelrun, numbermodelruns, processedlines)
|
write_python_processed(inputfile, modelrun, numbermodelruns, processedlines)
|
||||||
|
|
||||||
# Check validity of command names & that essential commands are present
|
# Check validity of command names & that essential commands are present
|
||||||
@@ -157,7 +361,7 @@ def run_model(args, modelrun, numbermodelruns, inputfile, inputdirectory):
|
|||||||
|
|
||||||
# Initialise an instance of the FDTDGrid class
|
# Initialise an instance of the FDTDGrid class
|
||||||
G = FDTDGrid()
|
G = FDTDGrid()
|
||||||
G.inputdirectory = inputdirectory
|
G.inputdirectory = usernamespace['inputdirectory']
|
||||||
|
|
||||||
# Process parameters for commands that can only occur once in the model
|
# Process parameters for commands that can only occur once in the model
|
||||||
process_singlecmds(singlecmds, multicmds, G)
|
process_singlecmds(singlecmds, multicmds, G)
|
||||||
|
@@ -18,19 +18,16 @@
|
|||||||
|
|
||||||
import sys, os
|
import sys, os
|
||||||
|
|
||||||
from gprMax.constants import c, e0, m0, z0
|
|
||||||
from gprMax.exceptions import CmdInputError
|
from gprMax.exceptions import CmdInputError
|
||||||
from gprMax.utilities import ListStream
|
from gprMax.utilities import ListStream
|
||||||
|
|
||||||
|
|
||||||
def python_code_blocks(inputfile, modelrun, numbermodelruns, inputdirectory):
|
def python_code_blocks(inputfile, usernamespace):
|
||||||
"""Looks for and processes any Python code found in the input file. It will ignore any lines that are comments, i.e. begin with a double hash (##), and any blank lines. It will also ignore any lines that do not begin with a hash (#) after it has processed Python commands.
|
"""Looks for and processes any Python code found in the input file. It will ignore any lines that are comments, i.e. begin with a double hash (##), and any blank lines. It will also ignore any lines that do not begin with a hash (#) after it has processed Python commands.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
inputfile (str): Name of the input file to open.
|
inputfile (str): Name of the input file to open.
|
||||||
modelrun (int): Current model run number.
|
usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
|
||||||
numbermodelruns (int): Total number of model runs.
|
|
||||||
inputdirectory (str): Directory containing input file.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
processedlines (list): Input commands after Python processing.
|
processedlines (list): Input commands after Python processing.
|
||||||
@@ -43,11 +40,6 @@ def python_code_blocks(inputfile, modelrun, numbermodelruns, inputdirectory):
|
|||||||
# List to hold final processed commands
|
# List to hold final processed commands
|
||||||
processedlines = []
|
processedlines = []
|
||||||
|
|
||||||
# Separate namespace for users Python code blocks to use; pre-populated some standard constants and the
|
|
||||||
# current model run number and total number of model runs
|
|
||||||
usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'current_model_run': modelrun, 'number_model_runs': numbermodelruns, 'inputdirectory': inputdirectory}
|
|
||||||
print('Constants/variables available for Python scripting: {}\n'.format(usernamespace))
|
|
||||||
|
|
||||||
x = 0
|
x = 0
|
||||||
while(x < len(inputlines)):
|
while(x < len(inputlines)):
|
||||||
if(inputlines[x].startswith('#python:')):
|
if(inputlines[x].startswith('#python:')):
|
||||||
@@ -72,16 +64,13 @@ def python_code_blocks(inputfile, modelrun, numbermodelruns, inputdirectory):
|
|||||||
|
|
||||||
# Add processed Python code to list
|
# Add processed Python code to list
|
||||||
processedlines.extend(codeproc)
|
processedlines.extend(codeproc)
|
||||||
x += 1
|
|
||||||
|
|
||||||
elif(inputlines[x].startswith('#')):
|
elif(inputlines[x].startswith('#')):
|
||||||
# Add gprMax command to list
|
# Add gprMax command to list
|
||||||
inputlines[x] += ('\n')
|
inputlines[x] += ('\n')
|
||||||
processedlines.append(inputlines[x])
|
processedlines.append(inputlines[x])
|
||||||
x += 1
|
|
||||||
|
|
||||||
else:
|
x += 1
|
||||||
x += 1
|
|
||||||
|
|
||||||
sys.stdout = sys.__stdout__ # Reset stdio
|
sys.stdout = sys.__stdout__ # Reset stdio
|
||||||
|
|
||||||
@@ -127,7 +116,7 @@ def check_cmd_names(processedlines):
|
|||||||
essentialcmds = ['#domain', '#dx_dy_dz', '#time_window']
|
essentialcmds = ['#domain', '#dx_dy_dz', '#time_window']
|
||||||
|
|
||||||
# Commands that there should only be one instance of in a model
|
# Commands that there should only be one instance of in a model
|
||||||
singlecmds = dict.fromkeys(['#domain', '#dx_dy_dz', '#time_window', '#title', '#messages', '#num_threads', '#time_step_stability_factor', '#time_step_limit_type', '#pml_cells', '#excitation_file', '#src_steps', '#rx_steps'], 'None')
|
singlecmds = dict.fromkeys(['#domain', '#dx_dy_dz', '#time_window', '#title', '#messages', '#num_threads', '#time_step_stability_factor', '#time_step_limit_type', '#pml_cells', '#excitation_file', '#src_steps', '#rx_steps', '#taguchi', '#end_taguchi'], 'None')
|
||||||
|
|
||||||
# Commands that there can be multiple instances of in a model - these will be lists within the dictionary
|
# Commands that there can be multiple instances of in a model - these will be lists within the dictionary
|
||||||
multiplecmds = {key: [] for key in ['#geometry_view', '#material', '#soil_peplinski', '#add_dispersion_debye', '#add_dispersion_lorentz', '#add_dispersion_drude', '#waveform', '#voltage_source', '#hertzian_dipole', '#magnetic_dipole', '#transmission_line', '#rx', '#rx_box', '#snapshot', '#pml_cfs']}
|
multiplecmds = {key: [] for key in ['#geometry_view', '#material', '#soil_peplinski', '#add_dispersion_debye', '#add_dispersion_lorentz', '#add_dispersion_drude', '#waveform', '#voltage_source', '#hertzian_dipole', '#magnetic_dipole', '#transmission_line', '#rx', '#rx_box', '#snapshot', '#pml_cfs']}
|
||||||
|
@@ -49,6 +49,7 @@ class Material():
|
|||||||
ID (str): Name of the material.
|
ID (str): Name of the material.
|
||||||
G (class): Grid class instance - holds essential parameters describing the model.
|
G (class): Grid class instance - holds essential parameters describing the model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.numID = numID
|
self.numID = numID
|
||||||
self.ID = ID
|
self.ID = ID
|
||||||
self.type = 'standard'
|
self.type = 'standard'
|
||||||
@@ -73,6 +74,7 @@ class Material():
|
|||||||
Args:
|
Args:
|
||||||
G (class): Grid class instance - holds essential parameters describing the model.
|
G (class): Grid class instance - holds essential parameters describing the model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
HA = (m0*self.mr / G.dt) + 0.5*self.sm
|
HA = (m0*self.mr / G.dt) + 0.5*self.sm
|
||||||
HB = (m0*self.mr / G.dt) - 0.5*self.sm
|
HB = (m0*self.mr / G.dt) - 0.5*self.sm
|
||||||
self.DA = HB / HA
|
self.DA = HB / HA
|
||||||
@@ -155,6 +157,7 @@ class PeplinskiSoil:
|
|||||||
sandpartdensity (float): Density of the sand particles in the soil (g/cm3).
|
sandpartdensity (float): Density of the sand particles in the soil (g/cm3).
|
||||||
watervolfraction (float): Two numbers that specify a range for the volumetric water fraction of the soil.
|
watervolfraction (float): Two numbers that specify a range for the volumetric water fraction of the soil.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.ID = ID
|
self.ID = ID
|
||||||
self.S = sandfraction
|
self.S = sandfraction
|
||||||
self.C = clayfraction
|
self.C = clayfraction
|
||||||
|
@@ -28,6 +28,7 @@ class Rx:
|
|||||||
positiony (float): y-coordinate of location in model.
|
positiony (float): y-coordinate of location in model.
|
||||||
positionz (float): z-coordinate of location in model.
|
positionz (float): z-coordinate of location in model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.ID = None
|
self.ID = None
|
||||||
self.outputs = []
|
self.outputs = []
|
||||||
self.positionx = positionx
|
self.positionx = positionx
|
||||||
|
@@ -49,6 +49,7 @@ class Snapshot:
|
|||||||
time (int): Iteration number to take the snapshot on.
|
time (int): Iteration number to take the snapshot on.
|
||||||
filename (str): Filename to save to.
|
filename (str): Filename to save to.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.xs = xs
|
self.xs = xs
|
||||||
self.ys = ys
|
self.ys = ys
|
||||||
self.zs = zs
|
self.zs = zs
|
||||||
|
@@ -54,6 +54,7 @@ GNU General Public License for more details.
|
|||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
You should have received a copy of the GNU General Public License
|
||||||
along with gprMax. If not, see <http://www.gnu.org/licenses/>."""
|
along with gprMax. If not, see <http://www.gnu.org/licenses/>."""
|
||||||
|
|
||||||
width = 65
|
width = 65
|
||||||
url = 'www.gprmax.com'
|
url = 'www.gprmax.com'
|
||||||
print('\n{} {} {}'.format('*'*round((width - len(url))/2), url, '*'*round((width - len(url))/2)))
|
print('\n{} {} {}'.format('*'*round((width - len(url))/2), url, '*'*round((width - len(url))/2)))
|
||||||
@@ -61,7 +62,6 @@ along with gprMax. If not, see <http://www.gnu.org/licenses/>."""
|
|||||||
print('{}'.format(gprMaxlogo.renderText('gprMax')))
|
print('{}'.format(gprMaxlogo.renderText('gprMax')))
|
||||||
print('{} v{} {}'.format('*'*round((width - len(version))/2), (version), '*'*round((width - len(version))/2)))
|
print('{} v{} {}'.format('*'*round((width - len(version))/2), (version), '*'*round((width - len(version))/2)))
|
||||||
print(licenseinfo)
|
print(licenseinfo)
|
||||||
print('\n{}\n'.format('*'*(width+3)))
|
|
||||||
|
|
||||||
|
|
||||||
def update_progress(progress):
|
def update_progress(progress):
|
||||||
|
56
tools/plot_diffs.py
普通文件
56
tools/plot_diffs.py
普通文件
@@ -0,0 +1,56 @@
|
|||||||
|
# Copyright (C) 2015, Craig Warren
|
||||||
|
#
|
||||||
|
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||||
|
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
|
||||||
|
#
|
||||||
|
# Please use the attribution at http://dx.doi.org/10.1190/1.3548506
|
||||||
|
|
||||||
|
import os, argparse
|
||||||
|
import h5py
|
||||||
|
import numpy as np
|
||||||
|
np.seterr(divide='ignore')
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
"""Plots the differences (in dB) between a response and a reference response."""
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
parser = argparse.ArgumentParser(description='Plots the differences (in dB) between a response and a reference response.', usage='cd gprMax; python -m tools.plot_diffs refoutputfile outputfile')
|
||||||
|
parser.add_argument('refoutputfile', help='name of output file including path containing reference response')
|
||||||
|
parser.add_argument('outputfile', help='name of output file including path')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Load (from gprMax output file) the reference response
|
||||||
|
f = h5py.File(args.refoutputfile, 'r')
|
||||||
|
tmp = f['/rxs/rx1/']
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
refresp = np.array(tmp[fieldname])
|
||||||
|
|
||||||
|
# Load (from gprMax output file) the response
|
||||||
|
f = h5py.File(args.outputfile, 'r')
|
||||||
|
tmp = f['/rxs/rx1/']
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
modelresp = np.array(tmp[fieldname])
|
||||||
|
|
||||||
|
# Calculate differences
|
||||||
|
diffdB = np.abs(modelresp - refresp) / np.amax(np.abs(refresp))
|
||||||
|
diffdB = 20 * np.log10(diffdB)
|
||||||
|
print(np.abs(np.sum(diffdB[-np.isneginf(diffdB)])) / len(diffdB[-np.isneginf(diffdB)]))
|
||||||
|
|
||||||
|
# Plot differences
|
||||||
|
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Error [dB]'), num=args.outputfile, figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||||
|
ax.plot(diffdB, 'r', lw=2)
|
||||||
|
ax.grid()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@@ -11,12 +11,13 @@ from gprMax.exceptions import CmdInputError
|
|||||||
|
|
||||||
moduledirectory = os.path.dirname(os.path.abspath(__file__))
|
moduledirectory = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
def antenna_like_GSSI_1500(x, y, z, resolution=0.001, **kwargs):
|
||||||
"""Inserts a description of an antenna similar to the GSSI 1.5GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 170mm x 108mm x 45mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
"""Inserts a description of an antenna similar to the GSSI 1.5GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 170mm x 108mm x 45mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
||||||
resolution (float): Spatial resolution for the antenna model.
|
resolution (float): Spatial resolution for the antenna model.
|
||||||
|
kwargs (dict): Optional variables, e.g. can be fed from an optimisation process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Antenna geometry properties
|
# Antenna geometry properties
|
||||||
@@ -30,10 +31,21 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
|||||||
bowtieheight = 0.014
|
bowtieheight = 0.014
|
||||||
patchheight = 0.015
|
patchheight = 0.015
|
||||||
|
|
||||||
excitationfreq = 1.5e9 # GHz
|
# Unknown properties
|
||||||
# excitationfreq = 1.71e9 # Value from http://hdl.handle.net/1842/4074
|
if 'kwargs' in locals():
|
||||||
sourceresistance = 50 # Ohms
|
excitationfreq = kwargs['excitationfreq']
|
||||||
# sourceresistance = 4 # Value from http://hdl.handle.net/1842/4074
|
sourceresistance = kwargs['sourceresistance']
|
||||||
|
absorberEr = kwargs['absorberEr']
|
||||||
|
absorbersig = kwargs['absorbersig']
|
||||||
|
else:
|
||||||
|
excitationfreq = 1.5e9 # GHz
|
||||||
|
# excitationfreq = 1.71e9 # Value from http://hdl.handle.net/1842/4074
|
||||||
|
sourceresistance = 50 # Ohms
|
||||||
|
# sourceresistance = 4 # Value from http://hdl.handle.net/1842/4074
|
||||||
|
absorberEr = 1.7
|
||||||
|
# absorberEr = 1.58 # Value from http://hdl.handle.net/1842/4074
|
||||||
|
absorbersig = 0.59
|
||||||
|
# absorbersig = 0.428 # Value from http://hdl.handle.net/1842/4074
|
||||||
|
|
||||||
x = x - (casesize[0] / 2)
|
x = x - (casesize[0] / 2)
|
||||||
y = y - (casesize[1] / 2)
|
y = y - (casesize[1] / 2)
|
||||||
@@ -56,10 +68,9 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
|||||||
raise CmdInputError('This antenna module can only be used with a spatial discretisation of 1mm or 2mm')
|
raise CmdInputError('This antenna module can only be used with a spatial discretisation of 1mm or 2mm')
|
||||||
|
|
||||||
# Material definitions
|
# Material definitions
|
||||||
print('#material: 1.7 0.59 1.0 0.0 absorber')
|
print('#material: {:.2f} {:.3f} 1 0 absorber'.format(absorberEr, absorbersig))
|
||||||
# print('#material: 1.58 0.428 1.0 0.0 absorber') # Value from http://hdl.handle.net/1842/4074
|
print('#material: 3 0 1 0 pcb')
|
||||||
print('#material: 3.0 0.0 1.0 0.0 pcb')
|
print('#material: 2.35 0 1 0 hdpe')
|
||||||
print('#material: 2.35 0.0 1.0 0.0 hdpe')
|
|
||||||
|
|
||||||
# Antenna geometry
|
# Antenna geometry
|
||||||
# Plastic case
|
# Plastic case
|
||||||
@@ -147,12 +158,13 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
def antenna_like_MALA_1200(x, y, z, resolution=0.001, **kwargs):
|
||||||
"""Inserts a description of an antenna similar to the MALA 1.2GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 184mm x 109mm x 46mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
"""Inserts a description of an antenna similar to the MALA 1.2GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 184mm x 109mm x 46mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
||||||
resolution (float): Spatial resolution for the antenna model.
|
resolution (float): Spatial resolution for the antenna model.
|
||||||
|
kwargs (dict): Optional variables, e.g. can be fed from an optimisation process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Antenna geometry properties
|
# Antenna geometry properties
|
||||||
@@ -166,8 +178,17 @@ def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
|||||||
skidthickness = 0.006
|
skidthickness = 0.006
|
||||||
bowtieheight = 0.025
|
bowtieheight = 0.025
|
||||||
|
|
||||||
excitationfreq = 0.978e9 # GHz
|
# Unknown properties
|
||||||
sourceresistance = 1000 # Ohms
|
if 'kwargs' in locals():
|
||||||
|
excitationfreq = kwargs['excitationfreq']
|
||||||
|
sourceresistance = kwargs['sourceresistance']
|
||||||
|
absorberEr = kwargs['absorberEr']
|
||||||
|
absorbersig = kwargs['absorbersig']
|
||||||
|
else:
|
||||||
|
excitationfreq = 0.978e9 # GHz
|
||||||
|
sourceresistance = 1000 # Ohms
|
||||||
|
absorberEr = 6.49
|
||||||
|
absorbersig = 0.252
|
||||||
|
|
||||||
x = x - (casesize[0] / 2)
|
x = x - (casesize[0] / 2)
|
||||||
y = y - (casesize[1] / 2)
|
y = y - (casesize[1] / 2)
|
||||||
@@ -205,14 +226,14 @@ def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
|||||||
rxsiglower = ((1 / rxrescelllower) * (dy / (dx * dz))) / 2 # Divide by number of parallel edges per resistor
|
rxsiglower = ((1 / rxrescelllower) * (dy / (dx * dz))) / 2 # Divide by number of parallel edges per resistor
|
||||||
|
|
||||||
# Material definitions
|
# Material definitions
|
||||||
print('#material: 6.49 0.252 1.0 0.0 absorber')
|
print('#material: {:.2f} {:.3f} 1 0 absorber'.format(absorberEr, absorbersig))
|
||||||
print('#material: 3.0 0.0 1.0 0.0 pcb')
|
print('#material: 3 0 1 0 pcb')
|
||||||
print('#material: 2.35 0.0 1.0 0.0 hdpe')
|
print('#material: 2.35 0 1 0 hdpe')
|
||||||
print('#material: 2.26 0.0 1.0 0.0 polypropylene')
|
print('#material: 2.26 0 1 0 polypropylene')
|
||||||
print('#material: 3.0 {:.3f} 1.0 0.0 txreslower'.format(txsiglower))
|
print('#material: 3 {:.3f} 1 0 txreslower'.format(txsiglower))
|
||||||
print('#material: 3.0 {:.3f} 1.0 0.0 txresupper'.format(txsigupper))
|
print('#material: 3 {:.3f} 1 0 txresupper'.format(txsigupper))
|
||||||
print('#material: 3.0 {:.3f} 1.0 0.0 rxreslower'.format(rxsiglower))
|
print('#material: 3 {:.3f} 1 0 rxreslower'.format(rxsiglower))
|
||||||
print('#material: 3.0 {:.3f} 1.0 0.0 rxresupper'.format(rxsigupper))
|
print('#material: 3 {:.3f} 1 0 rxresupper'.format(rxsigupper))
|
||||||
|
|
||||||
# Antenna geometry
|
# Antenna geometry
|
||||||
# Shield - metallic enclosure
|
# Shield - metallic enclosure
|
||||||
|
二进制文件未显示。
二进制文件未显示。
250
user_libs/optimisations/taguchi.py
普通文件
250
user_libs/optimisations/taguchi.py
普通文件
@@ -0,0 +1,250 @@
|
|||||||
|
# Copyright (C) 2015, Craig Warren
|
||||||
|
#
|
||||||
|
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||||
|
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
|
||||||
|
#
|
||||||
|
# Please use the attribution at http://dx.doi.org/10.1190/1.3548506
|
||||||
|
|
||||||
|
import os
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import h5py
|
||||||
|
|
||||||
|
from gprMax.constants import floattype
|
||||||
|
from gprMax.exceptions import CmdInputError
|
||||||
|
|
||||||
|
moduledirectory = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
def taguchi_code_blocks(inputfile, taguchinamespace):
|
||||||
|
"""Looks for and processes a Taguchi code block (containing Python code) in the input file. It will ignore any lines that are comments, i.e. begin with a double hash (##), and any blank lines.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputfile (str): Name of the input file to open.
|
||||||
|
taguchinamespace (dict): Namespace that can be accessed by user a Taguchi code block in input file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
processedlines (list): Input commands after Python processing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(inputfile, 'r') as f:
|
||||||
|
# Strip out any newline characters and comments that must begin with double hashes
|
||||||
|
inputlines = [line.rstrip() for line in f if(not line.startswith('##') and line.rstrip('\n'))]
|
||||||
|
|
||||||
|
x = 0
|
||||||
|
while(x < len(inputlines)):
|
||||||
|
if(inputlines[x].startswith('#taguchi:')):
|
||||||
|
# String to hold Python code to be executed
|
||||||
|
taguchicode = ''
|
||||||
|
x += 1
|
||||||
|
while not inputlines[x].startswith('#end_taguchi:'):
|
||||||
|
# Add all code in current code block to string
|
||||||
|
taguchicode += inputlines[x] + '\n'
|
||||||
|
x += 1
|
||||||
|
if x == len(inputlines):
|
||||||
|
raise CmdInputError('Cannot find the end of the Taguchi code block, i.e. missing #end_taguchi: command.')
|
||||||
|
|
||||||
|
# Compile code for faster execution
|
||||||
|
taguchicompiledcode = compile(taguchicode, '<string>', 'exec')
|
||||||
|
|
||||||
|
# Execute code block & make available only usernamespace
|
||||||
|
exec(taguchicompiledcode, taguchinamespace)
|
||||||
|
|
||||||
|
x += 1
|
||||||
|
|
||||||
|
return taguchinamespace
|
||||||
|
|
||||||
|
|
||||||
|
def select_OA(optparams):
|
||||||
|
"""Load an orthogonal array (OA) from a numpy file. Configure and return OA and properties of OA.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optparams (dict): Dictionary containing name of parameters to optimise and their initial ranges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
OA (array): Orthogonal array
|
||||||
|
N (int): Number of experiments in OA
|
||||||
|
k (int): Number of parameters to optimise in OA
|
||||||
|
s (int): Number of levels in OA
|
||||||
|
t (int): Strength of OA
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load the appropriate OA
|
||||||
|
if len(optparams) <= 4:
|
||||||
|
OA = np.load(os.path.join(moduledirectory, 'OA_9_4_3_2.npy'))
|
||||||
|
elif len(optparams) <= 7:
|
||||||
|
OA = np.load(os.path.join(moduledirectory, 'OA_18_7_3_2.npy'))
|
||||||
|
else:
|
||||||
|
raise CmdInputError('Too many parameters to optimise for the available orthogonal arrays (OA). Please find and load a bigger, suitable OA.')
|
||||||
|
|
||||||
|
# Cut down OA columns to number of parameters to optimise
|
||||||
|
OA = OA[:, 0:len(optparams)]
|
||||||
|
|
||||||
|
# Number of experiments
|
||||||
|
N = OA.shape[0]
|
||||||
|
|
||||||
|
# Number of parameters to optimise
|
||||||
|
k = OA.shape[1]
|
||||||
|
|
||||||
|
# Number of levels
|
||||||
|
s = 3
|
||||||
|
|
||||||
|
# Strength
|
||||||
|
t = 2
|
||||||
|
|
||||||
|
return OA, N, k, s
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, i):
|
||||||
|
"""Calculate values for parameters to optimise for a set of experiments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||||
|
optparamsinit (list): Initial ranges for parameters to optimise
|
||||||
|
levels (array): Lower, central, and upper values for each parameter
|
||||||
|
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||||
|
levelsdiff (array): Difference used to set values in levels array
|
||||||
|
OA (array): Orthogonal array
|
||||||
|
N (int): Number of experiments in OA
|
||||||
|
k (int): Number of parameters to optimise in OA
|
||||||
|
s (int): Number of levels in OA
|
||||||
|
i (int): Iteration number
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||||
|
levels (array): Lower, central, and upper values for each parameter
|
||||||
|
levelsdiff (array): Difference used to set values in levels array
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Reducing function used for calculating levels
|
||||||
|
RR = np.exp(-(i/18)**2)
|
||||||
|
|
||||||
|
# Calculate levels for each parameter
|
||||||
|
for p in range(0, k):
|
||||||
|
# Central levels - for first iteration set to midpoint of initial range and don't use RR
|
||||||
|
if i == 0:
|
||||||
|
levels[1, p] = ((optparamsinit[p][1][1] - optparamsinit[p][1][0]) / 2) + optparamsinit[p][1][0]
|
||||||
|
levelsdiff[p] = (optparamsinit[p][1][1] - optparamsinit[p][1][0]) / (s + 1)
|
||||||
|
# Central levels - set to optimum from previous iteration
|
||||||
|
else:
|
||||||
|
levels[1, p] = levels[levelsopt[p], p]
|
||||||
|
levelsdiff[p] = RR * levelsdiff[p]
|
||||||
|
|
||||||
|
# Lower levels set using central level and level differences values; and check they are not outwith initial ranges
|
||||||
|
if levels[1, p] - levelsdiff[p] < optparamsinit[p][1][0]:
|
||||||
|
levels[0, p] = optparamsinit[p][1][0]
|
||||||
|
else:
|
||||||
|
levels[0, p] = levels[1, p] - levelsdiff[p]
|
||||||
|
|
||||||
|
# Upper levels set using central level and level differences values; and check they are not outwith initial ranges
|
||||||
|
if levels[1, p] + levelsdiff[p] > optparamsinit[p][1][1]:
|
||||||
|
levels[2, p] = optparamsinit[p][1][1]
|
||||||
|
else:
|
||||||
|
levels[2, p] = levels[1, p] + levelsdiff[p]
|
||||||
|
|
||||||
|
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
|
||||||
|
optparams = OrderedDict((key, list()) for key in optparams)
|
||||||
|
p = 0
|
||||||
|
for key, value in optparams.items():
|
||||||
|
for exp in range(0, N):
|
||||||
|
if OA[exp, p] == 0:
|
||||||
|
optparams[key].append(levels[0, p])
|
||||||
|
elif OA[exp, p] == 1:
|
||||||
|
optparams[key].append(levels[1, p])
|
||||||
|
elif OA[exp, p] == 2:
|
||||||
|
optparams[key].append(levels[2, p])
|
||||||
|
p += 1
|
||||||
|
|
||||||
|
return optparams, levels, levelsdiff
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k):
|
||||||
|
"""Calculate optimal levels from results of fitness metric by building a response table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||||
|
levels (array): Lower, central, and upper values for each parameter
|
||||||
|
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||||
|
fitnessvalues (list): Values from results of fitness metric
|
||||||
|
OA (array): Orthogonal array
|
||||||
|
N (int): Number of experiments in OA
|
||||||
|
k (int): Number of parameters to optimise in OA
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||||
|
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Build a table of responses based on the results of the fitness metric
|
||||||
|
for p in range(0, k):
|
||||||
|
responses = np.zeros(3, dtype=floattype)
|
||||||
|
|
||||||
|
cnt1 = 0
|
||||||
|
cnt2 = 0
|
||||||
|
cnt3 = 0
|
||||||
|
|
||||||
|
for exp in range(1, N):
|
||||||
|
if OA[exp, p] == 0:
|
||||||
|
responses[0] += fitnessvalues[exp]
|
||||||
|
cnt1 += 1
|
||||||
|
elif OA[exp, p] == 1:
|
||||||
|
responses[1] += fitnessvalues[exp]
|
||||||
|
cnt2 += 1
|
||||||
|
elif OA[exp, p] == 2:
|
||||||
|
responses[2] += fitnessvalues[exp]
|
||||||
|
cnt3 += 1
|
||||||
|
|
||||||
|
responses[0] /= cnt1
|
||||||
|
responses[1] /= cnt2
|
||||||
|
responses[2] /= cnt3
|
||||||
|
|
||||||
|
# Calculate optimal level from table of responses
|
||||||
|
tmp = np.where(responses == np.amax(responses))[0]
|
||||||
|
|
||||||
|
# If there is more than one level found use the first
|
||||||
|
if len(tmp) > 1:
|
||||||
|
tmp = tmp[0]
|
||||||
|
|
||||||
|
levelsopt[p] = tmp
|
||||||
|
|
||||||
|
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
|
||||||
|
optparams = OrderedDict((key, list()) for key in optparams)
|
||||||
|
p = 0
|
||||||
|
for key, value in optparams.items():
|
||||||
|
optparams[key].append(levels[levelsopt[p], p])
|
||||||
|
p += 1
|
||||||
|
|
||||||
|
return optparams, levelsopt
|
||||||
|
|
||||||
|
|
||||||
|
def plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit):
|
||||||
|
"""Plot the history of fitness values and each optimised parameter values for the optimisation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
fitnessvalueshist (list): History of fitness values
|
||||||
|
optparamshist (dict): Name of parameters to optimise and history of their values
|
||||||
|
"""
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
# Plot history of fitness values
|
||||||
|
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Fitness value'), num='History of fitness values', figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||||
|
iterations = np.arange(1, len(fitnessvalueshist) + 1)
|
||||||
|
ax.plot(iterations, fitnessvalueshist, 'r', marker='.', ms=15, lw=1)
|
||||||
|
ax.set_xlim(1, len(fitnessvalueshist) + 1)
|
||||||
|
ax.grid()
|
||||||
|
|
||||||
|
# Plot history of optimisation parameters
|
||||||
|
p = 0
|
||||||
|
for key, value in optparamshist.items():
|
||||||
|
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Parameter value'), num='History of ' + key + ' parameter', figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||||
|
ax.plot(iterations, optparamshist[key], 'r', marker='.', ms=15, lw=1)
|
||||||
|
ax.set_xlim(1, len(value) + 1)
|
||||||
|
ax.set_ylim(optparamsinit[p][1][0], optparamsinit[p][1][1])
|
||||||
|
ax.grid()
|
||||||
|
p += 1
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
@@ -0,0 +1,163 @@
|
|||||||
|
# Copyright (C) 2015, Craig Warren
|
||||||
|
#
|
||||||
|
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||||
|
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
|
||||||
|
#
|
||||||
|
# Please use the attribution at http://dx.doi.org/10.1190/1.3548506
|
||||||
|
|
||||||
|
import h5py
|
||||||
|
import numpy as np
|
||||||
|
np.seterr(divide='ignore')
|
||||||
|
from scipy import signal
|
||||||
|
|
||||||
|
"""This module contains fitness metric functions that can be used with the Taguchi optimisation method.
|
||||||
|
|
||||||
|
All fitness functions must take two arguments and return a single fitness value.
|
||||||
|
The first argument should be the name of the output file
|
||||||
|
The second argument is a list which can contain any number of additional arguments, e.g. names (IDs) of outputs (rxs) from input file
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def fitness_max(filename, args):
|
||||||
|
"""Maximum value from a response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename (str): Name of output file
|
||||||
|
args (dict): 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
maxvalue (float): Maximum value from specific outputs
|
||||||
|
"""
|
||||||
|
|
||||||
|
f = h5py.File(filename, 'r')
|
||||||
|
nrx = f.attrs['nrx']
|
||||||
|
|
||||||
|
for rx in range(1, nrx + 1):
|
||||||
|
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||||
|
if tmp.attrs['Name'] in args['outputs']:
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
maxvalue = np.amax(tmp[fieldname])
|
||||||
|
|
||||||
|
return maxvalue
|
||||||
|
|
||||||
|
|
||||||
|
def fitness_xcorr(filename, args):
|
||||||
|
"""Maximum value of a cross-correlation between a response and a reference response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename (str): Name of output file
|
||||||
|
args (dict): 'refresp' key with path & filename of reference response (time, amp) stored in a text file; 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
xcorrmax (float): Maximum value from specific outputs
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load (from text file) and normalise the reference response
|
||||||
|
with open(args['refresp'], 'r') as f:
|
||||||
|
refdata = np.loadtxt(f)
|
||||||
|
reftime = refdata[:,0] * 1e-9
|
||||||
|
refresp = refdata[:,1]
|
||||||
|
refresp /= np.amax(np.abs(refresp))
|
||||||
|
|
||||||
|
# Load response from output file
|
||||||
|
f = h5py.File(filename, 'r')
|
||||||
|
nrx = f.attrs['nrx']
|
||||||
|
modeltime = np.arange(0, f.attrs['dt'] * f.attrs['Iterations'], f.attrs['dt'])
|
||||||
|
|
||||||
|
for rx in range(1, nrx + 1):
|
||||||
|
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||||
|
if tmp.attrs['Name'] in args['outputs']:
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
modelresp = tmp[fieldname]
|
||||||
|
# Convert field value (V/m) to voltage
|
||||||
|
if fieldname == 'Ex':
|
||||||
|
modelresp *= -1 * f.attrs['dx, dy, dz'][0]
|
||||||
|
elif fieldname == 'Ey':
|
||||||
|
modelresp *= -1 * f.attrs['dx, dy, dz'][1]
|
||||||
|
if fieldname == 'Ez':
|
||||||
|
modelresp *= -1 * f.attrs['dx, dy, dz'][2]
|
||||||
|
|
||||||
|
# Normalise respose from output file
|
||||||
|
modelresp /= np.amax(np.abs(modelresp))
|
||||||
|
|
||||||
|
# Make both responses the same length in time
|
||||||
|
if reftime[-1] > modeltime[-1]:
|
||||||
|
reftime = np.arange(0, f.attrs['dt'] * f.attrs['Iterations'], reftime[-1] / len(reftime))
|
||||||
|
refresp = refresp[0:len(reftime)]
|
||||||
|
elif modeltime[-1] > reftime[-1]:
|
||||||
|
modeltime = np.arange(0, reftime[-1], f.attrs['dt'])
|
||||||
|
modelresp = modelresp[0:len(modeltime)]
|
||||||
|
|
||||||
|
# Downsample the response with the higher sampling rate
|
||||||
|
if len(modeltime) < len(reftime):
|
||||||
|
refresp = signal.resample(refresp, len(modelresp))
|
||||||
|
elif len(reftime) < len(modeltime):
|
||||||
|
modelresp = signal.resample(modelresp, len(refresp))
|
||||||
|
|
||||||
|
# Plots responses for checking
|
||||||
|
# fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Voltage [V]'), figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||||
|
# ax.plot(refresp,'r', lw=2, label='refresp')
|
||||||
|
# ax.plot(modelresp,'b', lw=2, label='modelresp')
|
||||||
|
# ax.grid()
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# Calculate cross-correlation
|
||||||
|
xcorr = signal.correlate(refresp, modelresp)
|
||||||
|
# Plot cross-correlation for checking
|
||||||
|
# fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Voltage [V]'), figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||||
|
# ax.plot(xcorr,'r', lw=2, label='xcorr')
|
||||||
|
# ax.grid()
|
||||||
|
# plt.show()
|
||||||
|
xcorrmax = np.amax(xcorr) / 100
|
||||||
|
|
||||||
|
return xcorrmax
|
||||||
|
|
||||||
|
|
||||||
|
def fitness_diffs(filename, args):
|
||||||
|
"""Sum of the differences (in dB) between responses and a reference response.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename (str): Name of output file
|
||||||
|
args (dict): 'refresp' key with path & filename of reference response; 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
diffdB (float): Sum of the differences (in dB) between responses and a reference response
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Load (from gprMax output file) the reference response
|
||||||
|
f = h5py.File(args['refresp'], 'r')
|
||||||
|
tmp = f['/rxs/rx1/']
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
refresp = np.array(tmp[fieldname])
|
||||||
|
|
||||||
|
# Load (from gprMax output file) the response
|
||||||
|
f = h5py.File(filename, 'r')
|
||||||
|
nrx = f.attrs['nrx']
|
||||||
|
|
||||||
|
diffdB = 0
|
||||||
|
outputs = 0
|
||||||
|
for rx in range(1, nrx + 1):
|
||||||
|
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||||
|
if tmp.attrs['Name'] in args['outputs']:
|
||||||
|
fieldname = list(tmp.keys())[0]
|
||||||
|
modelresp = np.array(tmp[fieldname])
|
||||||
|
# Calculate sum of differences
|
||||||
|
tmp = 20 * np.log10(np.abs(modelresp - refresp) / np.amax(np.abs(refresp)))
|
||||||
|
tmp = np.abs(np.sum(tmp[-np.isneginf(tmp)])) / len(tmp[-np.isneginf(tmp)])
|
||||||
|
diffdB += tmp
|
||||||
|
outputs += 1
|
||||||
|
|
||||||
|
return diffdB / outputs
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,20 @@
|
|||||||
|
#title: MALA 1.2GHz 'like' antenna in free-space
|
||||||
|
#domain: 0.264 0.189 0.220
|
||||||
|
#dx_dy_dz: 0.001 0.001 0.001
|
||||||
|
#time_window: 6e-9
|
||||||
|
|
||||||
|
#taguchi:
|
||||||
|
## Dictionary containing name of parameters to optimise and their values
|
||||||
|
optparams['excitationfreq'] = [0.8e9, 2.5e9]
|
||||||
|
optparams['sourceresistance'] = [1, 10000]
|
||||||
|
optparams['absorberEr'] = [1, 50]
|
||||||
|
optparams['absorbersig'] = [0.01, 1]
|
||||||
|
|
||||||
|
## Dictionary containing name of fitness metric to use, stopping criterion, and names of associated outputs (should correspond to names of rxs in input file)
|
||||||
|
fitness = {'name': 'fitness_xcorr', 'stop': 0.98, 'args': {'refresp': inputdirectory + 'antenna_MALA_1200_fs_real.txt', 'outputs': 'rxMALA1200'}}
|
||||||
|
#end_taguchi:
|
||||||
|
|
||||||
|
#python:
|
||||||
|
from user_libs.antennas import antenna_like_MALA_1200
|
||||||
|
antenna_like_MALA_1200(0.132, 0.095, 0.100, resolution=0.001, **optparams)
|
||||||
|
#end_python:
|
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,28 @@
|
|||||||
|
#title: A-scan from a metal cylinder buried in a dielectric half-space
|
||||||
|
#domain: 0.240 0.190 0.002
|
||||||
|
#dx_dy_dz: 0.002 0.002 0.002
|
||||||
|
#time_window: 3e-9
|
||||||
|
#time_step_limit_type: 2D
|
||||||
|
#pml_cells: 10 10 0 10 10 0
|
||||||
|
|
||||||
|
#material: 6 0 1 0 half_space
|
||||||
|
|
||||||
|
#taguchi:
|
||||||
|
## Dictionary containing name of parameters to optimise and their values
|
||||||
|
optparams['rickeramp'] = [0.25, 5]
|
||||||
|
|
||||||
|
## Dictionary containing name of fitness metric to use, stopping criterion, and names of associated outputs (should correspond to names of rxs in input file)
|
||||||
|
fitness = {'name': 'fitness_max', 'stop': 4000, 'args': {'outputs': 'myRx'}}
|
||||||
|
#end_taguchi:
|
||||||
|
|
||||||
|
#python:
|
||||||
|
print('#waveform: ricker {} 1.5e9 my_ricker'.format(optparams['rickeramp']))
|
||||||
|
#end_python:
|
||||||
|
|
||||||
|
#hertzian_dipole: z 0.100 0.170 0 my_ricker
|
||||||
|
#rx: 0.140 0.170 0 myRx Ez
|
||||||
|
|
||||||
|
#box: 0 0 0 0.240 0.170 0.002 half_space
|
||||||
|
#cylinder: 0.120 0.080 0 0.120 0.080 0.002 0.010 pec
|
||||||
|
|
||||||
|
geometry_view: 0 0 0 0.240 0.190 0.002 0.002 0.002 0.002 cylinder_half_space n
|
在新工单中引用
屏蔽一个用户