add hybrid Differential Evolution alghoritm

这个提交包含在:
majsylw
2021-07-09 19:44:18 +02:00
父节点 b7e997046c
当前提交 d7ab02cb7a
共有 2 个文件被更改,包括 203 次插入129 次删除

查看文件

@@ -52,9 +52,10 @@ class Relaxation(object):
(not neglected as default: True). (not neglected as default: True).
:type save: bool, optional, default:True :type save: bool, optional, default:True
:param optimizer: chosen optimization method: :param optimizer: chosen optimization method:
Particle Swarm, Genetic or Dual Annealing Hybrid Particle Swarm-Damped Least-Squares,
(Default: Particle_swarm). Genetic or Dual Annealing (DA)
:type optimizer: Optimizer class, optional, default:Particle_swarm (Default: PSO_DLS).
:type optimizer: Optimizer class, optional
:param optimizer_options: Additional keyword arguments passed to :param optimizer_options: Additional keyword arguments passed to
optimizer class (Default: empty dict). optimizer class (Default: empty dict).
:type optimizer_options: dict, optional, default: empty dict :type optimizer_options: dict, optional, default: empty dict
@@ -64,7 +65,7 @@ class Relaxation(object):
material_name, f_n=50, material_name, f_n=50,
number_of_debye_poles=-1, number_of_debye_poles=-1,
plot=True, save=True, plot=True, save=True,
optimizer=Particle_swarm, optimizer=PSO_DLS,
optimizer_options={}): optimizer_options={}):
self.name = 'Relaxation function' self.name = 'Relaxation function'
self.params = {} self.params = {}
@@ -139,13 +140,13 @@ class Relaxation(object):
""" Calling the main optimisation module with defined lower and upper boundaries of search. """ Calling the main optimisation module with defined lower and upper boundaries of search.
Returns: Returns:
xpm (ndarray): The logarithm with base 10 of relaxation times of the Debyes poles. tau (ndarray): The optimised relaxation times.
mx (ndarray): Resulting optimised weights for the given relaxation times. weights (ndarray): Resulting optimised weights for the given relaxation times.
ee (float): Average error between the actual and the approximated real part. ee (float): Average error between the actual and the approximated real part.
rp (ndarray): The real part of the permittivity for the optimised relaxation rl (ndarray): Real parts of chosen relaxation function
times and weights for the frequnecies included in freq. for given frequency points.
ip (ndarray): The imaginary part of the permittivity for the optimised im (ndarray): Imaginary parts of chosen relaxation function
relaxation times and weights for the frequnecies included in freq. for given frequency points.
""" """
# Define the lower and upper boundaries of search # Define the lower and upper boundaries of search
lb = np.full(self.number_of_debye_poles, lb = np.full(self.number_of_debye_poles,
@@ -153,14 +154,13 @@ class Relaxation(object):
ub = np.full(self.number_of_debye_poles, ub = np.full(self.number_of_debye_poles,
-np.log10(np.min(self.freq)) + 3) -np.log10(np.min(self.freq)) + 3)
# Call optimizer to minimize the cost function # Call optimizer to minimize the cost function
xmp, _ = self.optimizer.fit(func=cost_function, tau, weights, ee, rl, im = self.optimizer.fit(func=self.optimizer.cost_function,
lb=lb, ub=ub, lb=lb, ub=ub,
funckwargs={'rl_g': self.rl, funckwargs={'rl': self.rl,
'im_g': self.im, 'im': self.im,
'freq_g': self.freq} 'freq': self.freq}
) )
_, _, mx, ee, rp, ip = DLS(self.rl, self.im, xmp, self.freq) return tau, weights, ee, rl, im
return xmp, mx, ee, rp, ip
def run(self): def run(self):
""" Solve the problem described by the given relaxation function """ Solve the problem described by the given relaxation function
@@ -182,6 +182,10 @@ class Relaxation(object):
xmp, mx, ee, rp, ip = self.optimize() xmp, mx, ee, rp, ip = self.optimize()
# Print the results in gprMax format style # Print the results in gprMax format style
properties = self.print_output(xmp, mx, ee) properties = self.print_output(xmp, mx, ee)
err_real, err_imag = self.error(rp + ee, ip)
print(f'The average fractional error for:\n'
f'- real part: {err_real}\n'
f'- imaginary part: {err_imag}\n')
if self.save: if self.save:
self.save_result(properties) self.save_result(properties)
# Plot the actual and the approximate dielectric properties # Plot the actual and the approximate dielectric properties
@@ -265,6 +269,25 @@ class Relaxation(object):
ax.set_ylabel("Approximation error (%)") ax.set_ylabel("Approximation error (%)")
plt.show() plt.show()
def error(self, rl_exp, im_exp):
""" Calculate the average fractional error separately for
relative permittivity (real part) and conductivity (imaginary part)
Args:
rl_exp (ndarray): Real parts of optimised Debye expansion
for given frequency points (plus average error).
im_exp (ndarray): Imaginary parts of optimised Debye expansion
for given frequency points.
Returns:
avg_err_real (float): average fractional error
for relative permittivity (real part)
avg_err_imag (float): average fractional error
for conductivity (imaginary part)
"""
avg_err_real = np.sum(np.abs((rl_exp - self.rl)/self.rl) * 100)/len(rl_exp)
avg_err_imag = np.sum(np.abs((im_exp - self.im)/self.im) * 100)/len(im_exp)
return avg_err_real, avg_err_imag
@staticmethod @staticmethod
def save_result(output, fdir="../materials"): def save_result(output, fdir="../materials"):
""" Save the resulting Debye parameters in a gprMax format. """ Save the resulting Debye parameters in a gprMax format.
@@ -328,7 +351,7 @@ class HavriliakNegami(Relaxation):
sigma, mu, mu_sigma, material_name, sigma, mu, mu_sigma, material_name,
number_of_debye_poles=-1, f_n=50, number_of_debye_poles=-1, f_n=50,
plot=False, save=True, plot=False, save=True,
optimizer=Particle_swarm, optimizer=PSO_DLS,
optimizer_options={}): optimizer_options={}):
super(HavriliakNegami, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma, super(HavriliakNegami, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma,
material_name=material_name, f_n=f_n, material_name=material_name, f_n=f_n,
@@ -398,7 +421,7 @@ class Jonscher(Relaxation):
sigma, mu, mu_sigma, sigma, mu, mu_sigma,
material_name, number_of_debye_poles=-1, material_name, number_of_debye_poles=-1,
f_n=50, plot=False, save=True, f_n=50, plot=False, save=True,
optimizer=Particle_swarm, optimizer=PSO_DLS,
optimizer_options={}): optimizer_options={}):
super(Jonscher, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma, super(Jonscher, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma,
material_name=material_name, f_n=f_n, material_name=material_name, f_n=f_n,
@@ -431,7 +454,7 @@ class Jonscher(Relaxation):
if self.n_p > 1: if self.n_p > 1:
sys.exit("n_p value must range between 0-1 (0 <= n_p <= 1)") sys.exit("n_p value must range between 0-1 (0 <= n_p <= 1)")
if self.f_min == self.f_max: if self.f_min == self.f_max:
sys.exit("Error: Null frequency range") sys.exit("Error: Null frequency range!")
def calculation(self): def calculation(self):
"""Calculates the Q function for the given parameters""" """Calculates the Q function for the given parameters"""
@@ -462,7 +485,7 @@ class Crim(Relaxation):
materials, sigma, mu, mu_sigma, material_name, materials, sigma, mu, mu_sigma, material_name,
number_of_debye_poles=-1, f_n=50, number_of_debye_poles=-1, f_n=50,
plot=False, save=True, plot=False, save=True,
optimizer=Particle_swarm, optimizer=PSO_DLS,
optimizer_options={}): optimizer_options={}):
super(Crim, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma, super(Crim, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma,
@@ -553,7 +576,7 @@ class Rawdata(Relaxation):
material_name, number_of_debye_poles=-1, material_name, number_of_debye_poles=-1,
f_n=50, delimiter =',', f_n=50, delimiter =',',
plot=False, save=True, plot=False, save=True,
optimizer=Particle_swarm, optimizer=PSO_DLS,
optimizer_options={}): optimizer_options={}):
super(Rawdata, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma, super(Rawdata, self).__init__(sigma=sigma, mu=mu, mu_sigma=mu_sigma,
@@ -619,10 +642,20 @@ if __name__ == "__main__":
material_name="Kelley", material_name="Kelley",
number_of_debye_poles=5, f_n=100, number_of_debye_poles=5, f_n=100,
plot=True, save=False, plot=True, save=False,
optimizer=Dual_annealing, optimizer=DA,
optimizer_options={'seed': 111}) optimizer_options={'seed': 111})
setup.run() setup.run()
### Testing setup setup = HavriliakNegami(f_min=1e7, f_max=1e11,
alpha=1-0.09, beta=0.45,
e_inf=2.7, de=8.6-2.7, tau_0=9.4e-10,
sigma=0, mu=0, mu_sigma=0,
material_name="Kelley",
number_of_debye_poles=5, f_n=100,
plot=True, save=False,
optimizer=DE,
optimizer_options={'seed': 111})
setup.run()
'''### Testing setup
setup = Rawdata("Test.txt", 0.1, 1, 0.1, "M1", 3, plot=True, setup = Rawdata("Test.txt", 0.1, 1, 0.1, "M1", 3, plot=True,
optimizer_options={'seed': 111, optimizer_options={'seed': 111,
'pflag': True}) 'pflag': True})
@@ -640,4 +673,4 @@ if __name__ == "__main__":
materials = np.array([material1, material2]) materials = np.array([material1, material2])
setup = Crim(1*1e-1, 1e-9, 0.5, f, materials, 0.1, setup = Crim(1*1e-1, 1e-9, 0.5, f, materials, 0.1,
1, 0, "M4", 2, plot=True) 1, 0, "M4", 2, plot=True)
setup.run() setup.run()'''

查看文件

@@ -24,7 +24,7 @@ from tqdm import tqdm
class Optimizer(object): class Optimizer(object):
""" """
Create particle swarm optimisation object. Create choosen optimisation object.
:param maxiter: The maximum number of iterations for the :param maxiter: The maximum number of iterations for the
optimizer (Default: 1000). optimizer (Default: 1000).
@@ -36,14 +36,57 @@ class Optimizer(object):
def __init__(self, maxiter=1000, seed=None): def __init__(self, maxiter=1000, seed=None):
self.maxiter = maxiter self.maxiter = maxiter
self.seed = seed self.seed = seed
self.calc_weights = DLS
def fit(self): def fit(self, func, lb, ub, funckwargs={}):
""" """Call the optimization function that tries to find an optimal set
Call the optimization function that tries to find an optimal set
of relaxation times that minimise the error of relaxation times that minimise the error
between the actual and the approximated electric permittivity. between the actual and the approximated electric permittivity
and calculate optimised weights for the given relaxation times.
Args:
func (function): The function to be minimized.
lb (ndarray): The lower bounds of the design variable(s).
ub (ndarray): The upper bounds of the design variable(s).
funckwargs (dict): Additional keyword arguments passed to
objective and constraint function
(Default: empty dict).
Returns:
tau (ndarray): The the best relaxation times.
weights (ndarray): Resulting optimised weights for the given relaxation times.
ee (float): Average error between the actual and the approximated real part.
rl (ndarray): Real parts of chosen relaxation function
for given frequency points.
im (ndarray): Imaginary parts of chosen relaxation function
for given frequency points.
""" """
raise NotImplementedError() np.random.seed(self.seed)
# find the relaxation frequencies using Differential Evolution
tau, _ = self.calc_relaxation_times(func, lb, ub, funckwargs)
# find the weights using a damped least squares
_, _, weights, ee, rl_exp, im_exp = self.calc_weights(tau, **funckwargs)
return tau, weights, ee, rl_exp, im_exp
@staticmethod
def cost_function(x, rl, im, freq):
"""
The cost function is the average error between
the actual and the approximated electric permittivity.
Args:
x (ndarray): The logarithm with base 10 of relaxation times of the Debyes poles.
rl (ndarray): Real parts of chosen relaxation function
for given frequency points.
im (ndarray): Imaginary parts of chosen relaxation function
for given frequency points.
freq (ndarray): The frequencies vector for defined grid.
Returns:
cost (float): Sum of mean absolute errors for real and imaginary parts.
"""
cost_i, cost_r, _, _, _, _ = DLS(x, rl, im, freq)
return cost_i + cost_r
@staticmethod @staticmethod
def plot(x, y): def plot(x, y):
@@ -66,9 +109,9 @@ class Optimizer(object):
plt.pause(0.0001) plt.pause(0.0001)
class Particle_swarm(Optimizer): class PSO_DLS(Optimizer):
""" """ Create hybrid Particle Swarm-Damped Least Squares optimisation
Create particle swarm optimisation object with predefined parameters. object with predefined parameters.
:param swarmsize: The number of particles in the swarm (Default: 40). :param swarmsize: The number of particles in the swarm (Default: 40).
:type swarmsize: int, optional :type swarmsize: int, optional
@@ -98,7 +141,7 @@ class Particle_swarm(Optimizer):
minstep=1e-8, minfun=1e-8, minstep=1e-8, minfun=1e-8,
pflag=False, seed=None): pflag=False, seed=None):
super(Particle_swarm, self).__init__(maxiter, seed) super(PSO_DLS, self).__init__(maxiter, seed)
self.swarmsize = swarmsize self.swarmsize = swarmsize
self.omega = omega self.omega = omega
self.phip = phip self.phip = phip
@@ -107,7 +150,7 @@ class Particle_swarm(Optimizer):
self.minfun = minfun self.minfun = minfun
self.pflag = pflag self.pflag = pflag
def fit(self, func, lb, ub, funckwargs={}): def calc_relaxation_times(self, func, lb, ub, funckwargs={}):
""" """
A particle swarm optimisation that tries to find an optimal set A particle swarm optimisation that tries to find an optimal set
of relaxation times that minimise the error of relaxation times that minimise the error
@@ -124,7 +167,7 @@ class Particle_swarm(Optimizer):
(Default: empty dict). (Default: empty dict).
Returns: Returns:
g (ndarray): The swarm's best known position (optimal design). g (ndarray): The swarm's best known position (relaxation times).
fg (float): The objective value at ``g``. fg (float): The objective value at ``g``.
""" """
np.random.seed(self.seed) np.random.seed(self.seed)
@@ -217,75 +260,23 @@ class Particle_swarm(Optimizer):
else: else:
xpp.append(it) xpp.append(it)
ypp.append(fg) ypp.append(fg)
Particle_swarm.plot(xpp, ypp) PSO_DLS.plot(xpp, ypp)
return g, fg return g, fg
class Dual_annealing(Optimizer): class DA(Optimizer):
""" """ Create Dual Annealing object with predefined parameters.
Create dual annealing object with predefined parameters. The current class is a modified edition of the scipy.optimize
package which can be found at:
:param maxiter: The maximum number of iterations for the swarm https://docs.scipy.org/doc/scipy/reference/generated/
to search (Default: 1000). scipy.optimize.dual_annealing.html#scipy.optimize.dual_annealing
:type maxiter: int, optional
:param local_search_options: Extra keyword arguments to be passed
to the local minimizer, reffer to
scipy.optimize.minimize() function
(Default: empty dict).
:type local_search_options: dict, optional
:param initial_temp (float): The initial temperature, use higher values to
facilitates a wider search of the energy
landscape, allowing dual_annealing to escape
local minima that it is trapped in.
Range is (0.01, 5.e4] (Default: 5230).
:type initial_temp: float, optional
:param restart_temp_ratio: During the annealing process,
temperature is decreasing, when it
reaches initial_temp * restart_temp_ratio,
the reannealing process is triggered.
Range is (0, 1) (Default: 2e-5).
:type restart_temp_ratio: float, optional
:param visit: Parameter for visiting distribution. The value range is (1, 3]
(Default: 2.62).
:type visit: float, optional
:param accept: Parameter for acceptance distribution. It is used to control
the probability of acceptance. The lower the acceptance parameter,
the smaller the probability of acceptance. The value range (-1e4, -5]
(Default: -5.0).
:type accept: float, optional
:param no_local_search (bool): If no_local_search is set to True, a traditional
Generalized Simulated Annealing will be performed
with no local search strategy applied (Default: False).
:type no_local_search: bool, optional
:param maxfun: Soft limit for the number of objective function calls.
(Default: 1e7).
:type maxfun: int, optional
:param callback: A callback function with signature callback(x, f, context),
which will be called for all minima found.
x and f are the coordinates and function value of
the latest minimum found, and context has value in [0, 1, 2],
with the following meaning:
0: minimum detected in the annealing process.
1: detection occurred in the local search process.
2: detection done in the dual annealing process.
If the callback implementation returns True,
the algorithm will stop.
:type callback: None, callable, optional
:param x0: Coordinates of a single N-D starting point, shape(n,).
(Default: None).
:type x0: None, ndarray, optional
:param seed: Specify seed for repeatable minimizations.
The random numbers generated with this seed only
affect the visiting distribution function and
new coordinates generation (Default: None).
:type seed: None, int, optional
""" """
def __init__(self, maxiter=1000, def __init__(self, maxiter=1000,
local_search_options={}, initial_temp=5230.0, local_search_options={}, initial_temp=5230.0,
restart_temp_ratio=2e-05, visit=2.62, accept=-5.0, restart_temp_ratio=2e-05, visit=2.62, accept=-5.0,
maxfun=1e7, no_local_search=False, maxfun=1e7, no_local_search=False,
callback=None, x0=None, seed=None): callback=None, x0=None, seed=None):
super(Dual_annealing, self).__init__(maxiter, seed) super(DA, self).__init__(maxiter, seed)
self.local_search_options = local_search_options self.local_search_options = local_search_options
self.initial_temp = initial_temp self.initial_temp = initial_temp
self.restart_temp_ratio = restart_temp_ratio self.restart_temp_ratio = restart_temp_ratio
@@ -296,7 +287,7 @@ class Dual_annealing(Optimizer):
self.callback = callback self.callback = callback
self.x0 = x0 self.x0 = x0
def fit(self, func, lb, ub, funckwargs={}): def calc_relaxation_times(self, func, lb, ub, funckwargs={}):
""" """
Find the global minimum of a function using Dual Annealing. Find the global minimum of a function using Dual Annealing.
The current class is a modified edition of the scipy.optimize The current class is a modified edition of the scipy.optimize
@@ -306,15 +297,15 @@ class Dual_annealing(Optimizer):
Args: Args:
func (function): The function to be minimized func (function): The function to be minimized
lb (array): The lower bounds of the design variable(s) lb (ndarray): The lower bounds of the design variable(s)
ub (array): The upper bounds of the design variable(s) ub (ndarray): The upper bounds of the design variable(s)
funckwargs (dict): Additional keyword arguments passed to funckwargs (dict): Additional keyword arguments passed to
objective and constraint function objective and constraint function
(Default: empty dict) (Default: empty dict)
Returns: Returns:
g (array): The solution array (optimal design). x (ndarray): The solution array (relaxation times).
fg (float): The objective value at the solution. fun (float): The objective value at the best solution.
""" """
np.random.seed(self.seed) np.random.seed(self.seed)
result = scipy.optimize.dual_annealing(func, result = scipy.optimize.dual_annealing(func,
@@ -330,22 +321,92 @@ class Dual_annealing(Optimizer):
no_local_search=self.no_local_search, no_local_search=self.no_local_search,
callback=self.callback, callback=self.callback,
x0=self.x0) x0=self.x0)
print(result.message)
return result.x, result.fun return result.x, result.fun
def DLS(rl, im, logt, freq): class DE(Optimizer):
"""
Create Differential Evolution-Damped Least Squares object with predefined parameters.
The current class is a modified edition of the scipy.optimize
package which can be found at:
https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolution
"""
def __init__(self, maxiter=1000,
strategy='best1bin', popsize=15, tol=0.01, mutation=(0.5, 1),
recombination=0.7, callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate', workers=1,
constraints=(), seed=None):
super(DE, self).__init__(maxiter, seed)
self.strategy = strategy
self.popsize = popsize
self.tol = tol
self.mutation = mutation
self.recombination = recombination
self.callback = callback
self.disp = disp
self.polish = polish
self.init= init
self.atol = atol
self.updating = updating
self.workers = workers
self.constraints = constraints
def calc_relaxation_times(self, func, lb, ub, funckwargs={}):
"""
Find the global minimum of a function using Differential Evolution.
The current class is a modified edition of the scipy.optimize
package which can be found at:
https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolution
Args:
func (function): The function to be minimized
lb (ndarray): The lower bounds of the design variable(s)
ub (ndarray): The upper bounds of the design variable(s)
funckwargs (dict): Additional keyword arguments passed to
objective and constraint function
(Default: empty dict)
Returns:
x (ndarray): The solution array (relaxation times).
fun (float): The objective value at the best solution.
"""
np.random.seed(self.seed)
result = scipy.optimize.differential_evolution(func,
bounds=list(zip(lb, ub)),
args=funckwargs.values(),
strategy=self.strategy,
popsize=self.popsize,
tol=self.tol,
mutation=self.mutation,
recombination=self.recombination,
callback=self.callback,
disp=self.disp,
polish=self.polish,
init=self.init,
atol=self.atol,
updating=self.updating,
workers=self.workers,
constraints=self.constraints)
print(result.message)
return result.x, result.fun
def DLS(logt, rl, im, freq):
""" """
Find the weights using a non-linear least squares (LS) method, Find the weights using a non-linear least squares (LS) method,
the Levenberg–Marquardt algorithm (LMA or just LM), the Levenberg–Marquardt algorithm (LMA or just LM),
also known as the damped least-squares (DLS) method. also known as the damped least-squares (DLS) method.
Args: Args:
logt (ndarray): The best known position form optimization module (optimal design),
the logarithm with base 10 of relaxation times of the Debyes poles.
rl (ndarray): Real parts of chosen relaxation function rl (ndarray): Real parts of chosen relaxation function
for given frequency points. for given frequency points.
im (ndarray): Imaginary parts of chosen relaxation function im (ndarray): Imaginary parts of chosen relaxation function
for given frequency points. for given frequency points.
logt (ndarray): The best known position form optimization module (optimal design),
the logarithm with base 10 of relaxation times of the Debyes poles.
freq (ndarray): The frequencies vector for defined grid. freq (ndarray): The frequencies vector for defined grid.
Returns: Returns:
@@ -364,36 +425,16 @@ def DLS(rl, im, logt, freq):
# logt=log10(t0) for efficiency during the optimisation # logt=log10(t0) for efficiency during the optimisation
# Here they are transformed back t0=10**logt # Here they are transformed back t0=10**logt
tt = 10**logt tt = 10**logt
# y = Ax , here the A matrix for the real and the imaginary part is builded # y = Ax, here the A matrix for the real and the imaginary part is builded
d = 1 / (1 + 1j * 2 * np.pi * np.repeat( d = 1 / (1 + 1j * 2 * np.pi * np.repeat(
freq, len(tt)).reshape((-1, len(tt))) * tt) freq, len(tt)).reshape((-1, len(tt))) * tt)
# Adding dumping (Levenberg–Marquardt algorithm) # Adding dumping (Levenberg–Marquardt algorithm)
# Solving the overdetermined system y=Ax # Solving the overdetermined system y=Ax
x = np.abs(np.linalg.lstsq(d.imag, im, rcond=-1)[0]) # absolute damped least-squares solution x = np.abs(np.linalg.lstsq(d.imag, im, rcond=None)[0]) # absolute damped least-squares solution
rp, ip = np.matmul(d.real, x[np.newaxis].T).T[0], np.matmul(d.imag, x[np.newaxis].T).T[0] rp, ip = np.matmul(d.real, x[np.newaxis].T).T[0], np.matmul(d.imag, x[np.newaxis].T).T[0]
cost_i = np.sum(np.abs(ip-im))/len(im) cost_i = np.sum(np.abs(ip-im))/len(im)
ee = np.mean(rl - rp) ee = np.mean(rl - rp)
if ee < 1: if ee < 1:
ee = 1 ee = 1
cost_r = np.sum(np.abs(rp - rl + ee))/len(im) cost_r = np.sum(np.abs(rp + ee - rl))/len(im)
return cost_i, cost_r, x, ee, rp, ip return cost_i, cost_r, x, ee, rp, ip
def cost_function(x, rl_g, im_g, freq_g):
"""
The cost function is the average error between
the actual and the approximated electric permittivity.
Args:
x (ndarray): The logarithm with base 10 of relaxation times of the Debyes poles.
rl_g (ndarray): Real parts of chosen relaxation function
for given frequency points.
im_g (ndarray): Imaginary parts of chosen relaxation function
for given frequency points.
freq (ndarray): The frequencies vector for defined grid.
Returns:
cost (float): Sum of mean absolute errors for real and imaginary part.
"""
cost_i, cost_r, _, _, _, _ = DLS(rl_g, im_g, x, freq_g)
return cost_i + cost_r