你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-06 20:46:52 +08:00
Merge branch 'optimisation-taguchi'
这个提交包含在:
@@ -11,12 +11,13 @@ from gprMax.exceptions import CmdInputError
|
||||
|
||||
moduledirectory = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
||||
def antenna_like_GSSI_1500(x, y, z, resolution=0.001, **kwargs):
|
||||
"""Inserts a description of an antenna similar to the GSSI 1.5GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 170mm x 108mm x 45mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
||||
|
||||
Args:
|
||||
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
||||
resolution (float): Spatial resolution for the antenna model.
|
||||
kwargs (dict): Optional variables, e.g. can be fed from an optimisation process.
|
||||
"""
|
||||
|
||||
# Antenna geometry properties
|
||||
@@ -30,10 +31,21 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
||||
bowtieheight = 0.014
|
||||
patchheight = 0.015
|
||||
|
||||
excitationfreq = 1.5e9 # GHz
|
||||
# excitationfreq = 1.71e9 # Value from http://hdl.handle.net/1842/4074
|
||||
sourceresistance = 50 # Ohms
|
||||
# sourceresistance = 4 # Value from http://hdl.handle.net/1842/4074
|
||||
# Unknown properties
|
||||
if 'kwargs' in locals():
|
||||
excitationfreq = kwargs['excitationfreq']
|
||||
sourceresistance = kwargs['sourceresistance']
|
||||
absorberEr = kwargs['absorberEr']
|
||||
absorbersig = kwargs['absorbersig']
|
||||
else:
|
||||
excitationfreq = 1.5e9 # GHz
|
||||
# excitationfreq = 1.71e9 # Value from http://hdl.handle.net/1842/4074
|
||||
sourceresistance = 50 # Ohms
|
||||
# sourceresistance = 4 # Value from http://hdl.handle.net/1842/4074
|
||||
absorberEr = 1.7
|
||||
# absorberEr = 1.58 # Value from http://hdl.handle.net/1842/4074
|
||||
absorbersig = 0.59
|
||||
# absorbersig = 0.428 # Value from http://hdl.handle.net/1842/4074
|
||||
|
||||
x = x - (casesize[0] / 2)
|
||||
y = y - (casesize[1] / 2)
|
||||
@@ -56,10 +68,9 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
||||
raise CmdInputError('This antenna module can only be used with a spatial discretisation of 1mm or 2mm')
|
||||
|
||||
# Material definitions
|
||||
print('#material: 1.7 0.59 1.0 0.0 absorber')
|
||||
# print('#material: 1.58 0.428 1.0 0.0 absorber') # Value from http://hdl.handle.net/1842/4074
|
||||
print('#material: 3.0 0.0 1.0 0.0 pcb')
|
||||
print('#material: 2.35 0.0 1.0 0.0 hdpe')
|
||||
print('#material: {:.2f} {:.3f} 1 0 absorber'.format(absorberEr, absorbersig))
|
||||
print('#material: 3 0 1 0 pcb')
|
||||
print('#material: 2.35 0 1 0 hdpe')
|
||||
|
||||
# Antenna geometry
|
||||
# Plastic case
|
||||
@@ -147,12 +158,13 @@ def antenna_like_GSSI_1500(x, y, z, resolution=0.001):
|
||||
|
||||
|
||||
|
||||
def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
||||
def antenna_like_MALA_1200(x, y, z, resolution=0.001, **kwargs):
|
||||
"""Inserts a description of an antenna similar to the MALA 1.2GHz antenna. Can be used with 1mm (default) or 2mm spatial resolution. The external dimensions of the antenna are 184mm x 109mm x 46mm. One output point is defined between the arms of the receiever bowtie. The bowties are aligned with the y axis so the output is the y component of the electric field.
|
||||
|
||||
Args:
|
||||
x, y, z (float): Coordinates of a location in the model to insert the antenna. Coordinates are relative to the geometric centre of the antenna in the x-y plane and the bottom of the antenna skid in the z direction.
|
||||
resolution (float): Spatial resolution for the antenna model.
|
||||
kwargs (dict): Optional variables, e.g. can be fed from an optimisation process.
|
||||
"""
|
||||
|
||||
# Antenna geometry properties
|
||||
@@ -166,8 +178,17 @@ def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
||||
skidthickness = 0.006
|
||||
bowtieheight = 0.025
|
||||
|
||||
excitationfreq = 0.978e9 # GHz
|
||||
sourceresistance = 1000 # Ohms
|
||||
# Unknown properties
|
||||
if 'kwargs' in locals():
|
||||
excitationfreq = kwargs['excitationfreq']
|
||||
sourceresistance = kwargs['sourceresistance']
|
||||
absorberEr = kwargs['absorberEr']
|
||||
absorbersig = kwargs['absorbersig']
|
||||
else:
|
||||
excitationfreq = 0.978e9 # GHz
|
||||
sourceresistance = 1000 # Ohms
|
||||
absorberEr = 6.49
|
||||
absorbersig = 0.252
|
||||
|
||||
x = x - (casesize[0] / 2)
|
||||
y = y - (casesize[1] / 2)
|
||||
@@ -205,14 +226,14 @@ def antenna_like_MALA_1200(x, y, z, resolution=0.001):
|
||||
rxsiglower = ((1 / rxrescelllower) * (dy / (dx * dz))) / 2 # Divide by number of parallel edges per resistor
|
||||
|
||||
# Material definitions
|
||||
print('#material: 6.49 0.252 1.0 0.0 absorber')
|
||||
print('#material: 3.0 0.0 1.0 0.0 pcb')
|
||||
print('#material: 2.35 0.0 1.0 0.0 hdpe')
|
||||
print('#material: 2.26 0.0 1.0 0.0 polypropylene')
|
||||
print('#material: 3.0 {:.3f} 1.0 0.0 txreslower'.format(txsiglower))
|
||||
print('#material: 3.0 {:.3f} 1.0 0.0 txresupper'.format(txsigupper))
|
||||
print('#material: 3.0 {:.3f} 1.0 0.0 rxreslower'.format(rxsiglower))
|
||||
print('#material: 3.0 {:.3f} 1.0 0.0 rxresupper'.format(rxsigupper))
|
||||
print('#material: {:.2f} {:.3f} 1 0 absorber'.format(absorberEr, absorbersig))
|
||||
print('#material: 3 0 1 0 pcb')
|
||||
print('#material: 2.35 0 1 0 hdpe')
|
||||
print('#material: 2.26 0 1 0 polypropylene')
|
||||
print('#material: 3 {:.3f} 1 0 txreslower'.format(txsiglower))
|
||||
print('#material: 3 {:.3f} 1 0 txresupper'.format(txsigupper))
|
||||
print('#material: 3 {:.3f} 1 0 rxreslower'.format(rxsiglower))
|
||||
print('#material: 3 {:.3f} 1 0 rxresupper'.format(rxsigupper))
|
||||
|
||||
# Antenna geometry
|
||||
# Shield - metallic enclosure
|
||||
|
二进制文件未显示。
二进制文件未显示。
250
user_libs/optimisations/taguchi.py
普通文件
250
user_libs/optimisations/taguchi.py
普通文件
@@ -0,0 +1,250 @@
|
||||
# Copyright (C) 2015, Craig Warren
|
||||
#
|
||||
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
|
||||
#
|
||||
# Please use the attribution at http://dx.doi.org/10.1190/1.3548506
|
||||
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
|
||||
import numpy as np
|
||||
import h5py
|
||||
|
||||
from gprMax.constants import floattype
|
||||
from gprMax.exceptions import CmdInputError
|
||||
|
||||
moduledirectory = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def taguchi_code_blocks(inputfile, taguchinamespace):
|
||||
"""Looks for and processes a Taguchi code block (containing Python code) in the input file. It will ignore any lines that are comments, i.e. begin with a double hash (##), and any blank lines.
|
||||
|
||||
Args:
|
||||
inputfile (str): Name of the input file to open.
|
||||
taguchinamespace (dict): Namespace that can be accessed by user a Taguchi code block in input file.
|
||||
|
||||
Returns:
|
||||
processedlines (list): Input commands after Python processing.
|
||||
"""
|
||||
|
||||
with open(inputfile, 'r') as f:
|
||||
# Strip out any newline characters and comments that must begin with double hashes
|
||||
inputlines = [line.rstrip() for line in f if(not line.startswith('##') and line.rstrip('\n'))]
|
||||
|
||||
x = 0
|
||||
while(x < len(inputlines)):
|
||||
if(inputlines[x].startswith('#taguchi:')):
|
||||
# String to hold Python code to be executed
|
||||
taguchicode = ''
|
||||
x += 1
|
||||
while not inputlines[x].startswith('#end_taguchi:'):
|
||||
# Add all code in current code block to string
|
||||
taguchicode += inputlines[x] + '\n'
|
||||
x += 1
|
||||
if x == len(inputlines):
|
||||
raise CmdInputError('Cannot find the end of the Taguchi code block, i.e. missing #end_taguchi: command.')
|
||||
|
||||
# Compile code for faster execution
|
||||
taguchicompiledcode = compile(taguchicode, '<string>', 'exec')
|
||||
|
||||
# Execute code block & make available only usernamespace
|
||||
exec(taguchicompiledcode, taguchinamespace)
|
||||
|
||||
x += 1
|
||||
|
||||
return taguchinamespace
|
||||
|
||||
|
||||
def select_OA(optparams):
|
||||
"""Load an orthogonal array (OA) from a numpy file. Configure and return OA and properties of OA.
|
||||
|
||||
Args:
|
||||
optparams (dict): Dictionary containing name of parameters to optimise and their initial ranges
|
||||
|
||||
Returns:
|
||||
OA (array): Orthogonal array
|
||||
N (int): Number of experiments in OA
|
||||
k (int): Number of parameters to optimise in OA
|
||||
s (int): Number of levels in OA
|
||||
t (int): Strength of OA
|
||||
"""
|
||||
|
||||
# Load the appropriate OA
|
||||
if len(optparams) <= 4:
|
||||
OA = np.load(os.path.join(moduledirectory, 'OA_9_4_3_2.npy'))
|
||||
elif len(optparams) <= 7:
|
||||
OA = np.load(os.path.join(moduledirectory, 'OA_18_7_3_2.npy'))
|
||||
else:
|
||||
raise CmdInputError('Too many parameters to optimise for the available orthogonal arrays (OA). Please find and load a bigger, suitable OA.')
|
||||
|
||||
# Cut down OA columns to number of parameters to optimise
|
||||
OA = OA[:, 0:len(optparams)]
|
||||
|
||||
# Number of experiments
|
||||
N = OA.shape[0]
|
||||
|
||||
# Number of parameters to optimise
|
||||
k = OA.shape[1]
|
||||
|
||||
# Number of levels
|
||||
s = 3
|
||||
|
||||
# Strength
|
||||
t = 2
|
||||
|
||||
return OA, N, k, s
|
||||
|
||||
|
||||
def calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, i):
|
||||
"""Calculate values for parameters to optimise for a set of experiments.
|
||||
|
||||
Args:
|
||||
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||
optparamsinit (list): Initial ranges for parameters to optimise
|
||||
levels (array): Lower, central, and upper values for each parameter
|
||||
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||
levelsdiff (array): Difference used to set values in levels array
|
||||
OA (array): Orthogonal array
|
||||
N (int): Number of experiments in OA
|
||||
k (int): Number of parameters to optimise in OA
|
||||
s (int): Number of levels in OA
|
||||
i (int): Iteration number
|
||||
|
||||
Returns:
|
||||
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||
levels (array): Lower, central, and upper values for each parameter
|
||||
levelsdiff (array): Difference used to set values in levels array
|
||||
"""
|
||||
|
||||
# Reducing function used for calculating levels
|
||||
RR = np.exp(-(i/18)**2)
|
||||
|
||||
# Calculate levels for each parameter
|
||||
for p in range(0, k):
|
||||
# Central levels - for first iteration set to midpoint of initial range and don't use RR
|
||||
if i == 0:
|
||||
levels[1, p] = ((optparamsinit[p][1][1] - optparamsinit[p][1][0]) / 2) + optparamsinit[p][1][0]
|
||||
levelsdiff[p] = (optparamsinit[p][1][1] - optparamsinit[p][1][0]) / (s + 1)
|
||||
# Central levels - set to optimum from previous iteration
|
||||
else:
|
||||
levels[1, p] = levels[levelsopt[p], p]
|
||||
levelsdiff[p] = RR * levelsdiff[p]
|
||||
|
||||
# Lower levels set using central level and level differences values; and check they are not outwith initial ranges
|
||||
if levels[1, p] - levelsdiff[p] < optparamsinit[p][1][0]:
|
||||
levels[0, p] = optparamsinit[p][1][0]
|
||||
else:
|
||||
levels[0, p] = levels[1, p] - levelsdiff[p]
|
||||
|
||||
# Upper levels set using central level and level differences values; and check they are not outwith initial ranges
|
||||
if levels[1, p] + levelsdiff[p] > optparamsinit[p][1][1]:
|
||||
levels[2, p] = optparamsinit[p][1][1]
|
||||
else:
|
||||
levels[2, p] = levels[1, p] + levelsdiff[p]
|
||||
|
||||
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
|
||||
optparams = OrderedDict((key, list()) for key in optparams)
|
||||
p = 0
|
||||
for key, value in optparams.items():
|
||||
for exp in range(0, N):
|
||||
if OA[exp, p] == 0:
|
||||
optparams[key].append(levels[0, p])
|
||||
elif OA[exp, p] == 1:
|
||||
optparams[key].append(levels[1, p])
|
||||
elif OA[exp, p] == 2:
|
||||
optparams[key].append(levels[2, p])
|
||||
p += 1
|
||||
|
||||
return optparams, levels, levelsdiff
|
||||
|
||||
|
||||
def calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k):
|
||||
"""Calculate optimal levels from results of fitness metric by building a response table.
|
||||
|
||||
Args:
|
||||
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||
levels (array): Lower, central, and upper values for each parameter
|
||||
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||
fitnessvalues (list): Values from results of fitness metric
|
||||
OA (array): Orthogonal array
|
||||
N (int): Number of experiments in OA
|
||||
k (int): Number of parameters to optimise in OA
|
||||
|
||||
Returns:
|
||||
optparams (dict): Ordered dictionary containing name of parameters to optimise and their values
|
||||
levelsopt (array): Optimal level for each parameter from previous iteration
|
||||
"""
|
||||
|
||||
# Build a table of responses based on the results of the fitness metric
|
||||
for p in range(0, k):
|
||||
responses = np.zeros(3, dtype=floattype)
|
||||
|
||||
cnt1 = 0
|
||||
cnt2 = 0
|
||||
cnt3 = 0
|
||||
|
||||
for exp in range(1, N):
|
||||
if OA[exp, p] == 0:
|
||||
responses[0] += fitnessvalues[exp]
|
||||
cnt1 += 1
|
||||
elif OA[exp, p] == 1:
|
||||
responses[1] += fitnessvalues[exp]
|
||||
cnt2 += 1
|
||||
elif OA[exp, p] == 2:
|
||||
responses[2] += fitnessvalues[exp]
|
||||
cnt3 += 1
|
||||
|
||||
responses[0] /= cnt1
|
||||
responses[1] /= cnt2
|
||||
responses[2] /= cnt3
|
||||
|
||||
# Calculate optimal level from table of responses
|
||||
tmp = np.where(responses == np.amax(responses))[0]
|
||||
|
||||
# If there is more than one level found use the first
|
||||
if len(tmp) > 1:
|
||||
tmp = tmp[0]
|
||||
|
||||
levelsopt[p] = tmp
|
||||
|
||||
# Update dictionary of parameters to optimise with lists of new values; clear dictionary first
|
||||
optparams = OrderedDict((key, list()) for key in optparams)
|
||||
p = 0
|
||||
for key, value in optparams.items():
|
||||
optparams[key].append(levels[levelsopt[p], p])
|
||||
p += 1
|
||||
|
||||
return optparams, levelsopt
|
||||
|
||||
|
||||
def plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit):
|
||||
"""Plot the history of fitness values and each optimised parameter values for the optimisation.
|
||||
|
||||
Args:
|
||||
fitnessvalueshist (list): History of fitness values
|
||||
optparamshist (dict): Name of parameters to optimise and history of their values
|
||||
"""
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# Plot history of fitness values
|
||||
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Fitness value'), num='History of fitness values', figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||
iterations = np.arange(1, len(fitnessvalueshist) + 1)
|
||||
ax.plot(iterations, fitnessvalueshist, 'r', marker='.', ms=15, lw=1)
|
||||
ax.set_xlim(1, len(fitnessvalueshist) + 1)
|
||||
ax.grid()
|
||||
|
||||
# Plot history of optimisation parameters
|
||||
p = 0
|
||||
for key, value in optparamshist.items():
|
||||
fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Parameter value'), num='History of ' + key + ' parameter', figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||
ax.plot(iterations, optparamshist[key], 'r', marker='.', ms=15, lw=1)
|
||||
ax.set_xlim(1, len(value) + 1)
|
||||
ax.set_ylim(optparamsinit[p][1][0], optparamsinit[p][1][1])
|
||||
ax.grid()
|
||||
p += 1
|
||||
plt.show()
|
||||
|
||||
|
||||
|
@@ -0,0 +1,163 @@
|
||||
# Copyright (C) 2015, Craig Warren
|
||||
#
|
||||
# This module is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
|
||||
# To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/.
|
||||
#
|
||||
# Please use the attribution at http://dx.doi.org/10.1190/1.3548506
|
||||
|
||||
import h5py
|
||||
import numpy as np
|
||||
np.seterr(divide='ignore')
|
||||
from scipy import signal
|
||||
|
||||
"""This module contains fitness metric functions that can be used with the Taguchi optimisation method.
|
||||
|
||||
All fitness functions must take two arguments and return a single fitness value.
|
||||
The first argument should be the name of the output file
|
||||
The second argument is a list which can contain any number of additional arguments, e.g. names (IDs) of outputs (rxs) from input file
|
||||
"""
|
||||
|
||||
|
||||
def fitness_max(filename, args):
|
||||
"""Maximum value from a response.
|
||||
|
||||
Args:
|
||||
filename (str): Name of output file
|
||||
args (dict): 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||
|
||||
Returns:
|
||||
maxvalue (float): Maximum value from specific outputs
|
||||
"""
|
||||
|
||||
f = h5py.File(filename, 'r')
|
||||
nrx = f.attrs['nrx']
|
||||
|
||||
for rx in range(1, nrx + 1):
|
||||
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||
if tmp.attrs['Name'] in args['outputs']:
|
||||
fieldname = list(tmp.keys())[0]
|
||||
maxvalue = np.amax(tmp[fieldname])
|
||||
|
||||
return maxvalue
|
||||
|
||||
|
||||
def fitness_xcorr(filename, args):
|
||||
"""Maximum value of a cross-correlation between a response and a reference response.
|
||||
|
||||
Args:
|
||||
filename (str): Name of output file
|
||||
args (dict): 'refresp' key with path & filename of reference response (time, amp) stored in a text file; 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||
|
||||
Returns:
|
||||
xcorrmax (float): Maximum value from specific outputs
|
||||
"""
|
||||
|
||||
# Load (from text file) and normalise the reference response
|
||||
with open(args['refresp'], 'r') as f:
|
||||
refdata = np.loadtxt(f)
|
||||
reftime = refdata[:,0] * 1e-9
|
||||
refresp = refdata[:,1]
|
||||
refresp /= np.amax(np.abs(refresp))
|
||||
|
||||
# Load response from output file
|
||||
f = h5py.File(filename, 'r')
|
||||
nrx = f.attrs['nrx']
|
||||
modeltime = np.arange(0, f.attrs['dt'] * f.attrs['Iterations'], f.attrs['dt'])
|
||||
|
||||
for rx in range(1, nrx + 1):
|
||||
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||
if tmp.attrs['Name'] in args['outputs']:
|
||||
fieldname = list(tmp.keys())[0]
|
||||
modelresp = tmp[fieldname]
|
||||
# Convert field value (V/m) to voltage
|
||||
if fieldname == 'Ex':
|
||||
modelresp *= -1 * f.attrs['dx, dy, dz'][0]
|
||||
elif fieldname == 'Ey':
|
||||
modelresp *= -1 * f.attrs['dx, dy, dz'][1]
|
||||
if fieldname == 'Ez':
|
||||
modelresp *= -1 * f.attrs['dx, dy, dz'][2]
|
||||
|
||||
# Normalise respose from output file
|
||||
modelresp /= np.amax(np.abs(modelresp))
|
||||
|
||||
# Make both responses the same length in time
|
||||
if reftime[-1] > modeltime[-1]:
|
||||
reftime = np.arange(0, f.attrs['dt'] * f.attrs['Iterations'], reftime[-1] / len(reftime))
|
||||
refresp = refresp[0:len(reftime)]
|
||||
elif modeltime[-1] > reftime[-1]:
|
||||
modeltime = np.arange(0, reftime[-1], f.attrs['dt'])
|
||||
modelresp = modelresp[0:len(modeltime)]
|
||||
|
||||
# Downsample the response with the higher sampling rate
|
||||
if len(modeltime) < len(reftime):
|
||||
refresp = signal.resample(refresp, len(modelresp))
|
||||
elif len(reftime) < len(modeltime):
|
||||
modelresp = signal.resample(modelresp, len(refresp))
|
||||
|
||||
# Plots responses for checking
|
||||
# fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Voltage [V]'), figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||
# ax.plot(refresp,'r', lw=2, label='refresp')
|
||||
# ax.plot(modelresp,'b', lw=2, label='modelresp')
|
||||
# ax.grid()
|
||||
# plt.show()
|
||||
|
||||
# Calculate cross-correlation
|
||||
xcorr = signal.correlate(refresp, modelresp)
|
||||
# Plot cross-correlation for checking
|
||||
# fig, ax = plt.subplots(subplot_kw=dict(xlabel='Iterations', ylabel='Voltage [V]'), figsize=(20, 10), facecolor='w', edgecolor='w')
|
||||
# ax.plot(xcorr,'r', lw=2, label='xcorr')
|
||||
# ax.grid()
|
||||
# plt.show()
|
||||
xcorrmax = np.amax(xcorr) / 100
|
||||
|
||||
return xcorrmax
|
||||
|
||||
|
||||
def fitness_diffs(filename, args):
|
||||
"""Sum of the differences (in dB) between responses and a reference response.
|
||||
|
||||
Args:
|
||||
filename (str): Name of output file
|
||||
args (dict): 'refresp' key with path & filename of reference response; 'outputs' key with a list of names (IDs) of outputs (rxs) from input file
|
||||
|
||||
Returns:
|
||||
diffdB (float): Sum of the differences (in dB) between responses and a reference response
|
||||
"""
|
||||
|
||||
# Load (from gprMax output file) the reference response
|
||||
f = h5py.File(args['refresp'], 'r')
|
||||
tmp = f['/rxs/rx1/']
|
||||
fieldname = list(tmp.keys())[0]
|
||||
refresp = np.array(tmp[fieldname])
|
||||
|
||||
# Load (from gprMax output file) the response
|
||||
f = h5py.File(filename, 'r')
|
||||
nrx = f.attrs['nrx']
|
||||
|
||||
diffdB = 0
|
||||
outputs = 0
|
||||
for rx in range(1, nrx + 1):
|
||||
tmp = f['/rxs/rx' + str(rx) + '/']
|
||||
if tmp.attrs['Name'] in args['outputs']:
|
||||
fieldname = list(tmp.keys())[0]
|
||||
modelresp = np.array(tmp[fieldname])
|
||||
# Calculate sum of differences
|
||||
tmp = 20 * np.log10(np.abs(modelresp - refresp) / np.amax(np.abs(refresp)))
|
||||
tmp = np.abs(np.sum(tmp[-np.isneginf(tmp)])) / len(tmp[-np.isneginf(tmp)])
|
||||
diffdB += tmp
|
||||
outputs += 1
|
||||
|
||||
return diffdB / outputs
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
在新工单中引用
屏蔽一个用户