Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
iterations = 0
offset = 0.0001 # this is to ensure the upper bound for gamma is not equal to min(data) which would result in inf log-likelihood. This small offset fixes that issue
while success is False:
iterations += 1
if iterations == 1:
# get a quick initial guess using the minimum of the data
if min(all_data) <= np.e:
self.gamma = 0
else:
self.gamma = np.log(min(all_data))
gamma_initial_guess = self.gamma
else:
# get a better guess for gamma by optimizing the LL of a shifted distribution. This will only be run if the first attempt didn't work
gamma_initial_guess = min(all_data) - offset
bnds1 = [(0, min(all_data) - offset)] # bounds on the solution. Helps a lot with stability
gamma_res = minimize(Fit_Lognormal_3P.gamma_optimizer, gamma_initial_guess, args=(failures, right_censored), method='L-BFGS-B', bounds=bnds1)
self.gamma = gamma_res.x[0]
# obtain the initial guess for mu and sigma
data_shifted = all_data - self.gamma
sp = ss.lognorm.fit(data_shifted, floc=0, optimizer='powell') # scipy's answer is used as an initial guess. Scipy is only correct when there is no censored data
guess = [np.log(sp[2]), sp[0], self.gamma]
self.initial_guess = guess
k = len(guess)
n = len(all_data)
delta_BIC = 1
BIC_array = [1000000]
runs = 0
gamma_lower_bound = 0.85 * gamma_initial_guess # 0.85 is found to be the optimal point to minimise the error while also not causing autograd to fail
bnds2 = [(-10, None), (0, None), (gamma_lower_bound, min(all_data) - offset)] # bounds on the solution. Helps a lot with stability
self.initial_guess = guess
k = len(guess)
n = len(all_data)
delta_BIC = 1
BIC_array = [1000000]
runs = 0
gamma_lower_bound = 0.85 * gamma_initial_guess # 0.85 is found to be the optimal point to minimise the error while also not causing autograd to fail
bnds2 = [(-10, None), (0, None), (gamma_lower_bound, min(all_data) - offset)] # bounds on the solution. Helps a lot with stability
while delta_BIC > 0.001 and runs < 5: # exits after BIC convergence or 5 iterations
runs += 1
result = minimize(value_and_grad(Fit_Lognormal_3P.LL), guess, args=(failures, right_censored), jac=True, method='L-BFGS-B', bounds=bnds2)
params = result.x
guess = [params[0], params[1], params[2]]
LL2 = 2 * Fit_Lognormal_3P.LL(guess, failures, right_censored)
BIC_array.append(np.log(n) * k + LL2)
delta_BIC = abs(BIC_array[-1] - BIC_array[-2])
success = result.success
if result.success is True:
params = result.x
self.success = True
self.mu = params[0]
self.sigma = params[1]
self.gamma = params[2]
else:
self.success = False
print('WARNING: Fitting using Autograd FAILED for Lognormal_3P. The fit from Scipy was used instead so the results may not be accurate.')
sp = ss.lognorm.fit(all_data, optimizer='powell')
self.mu = np.log(sp[2])
self.sigma = sp[0]
def LL(params, T_f, T_rc): # log likelihood function (3 parameter Lognormal)
LL_f = 0
LL_rc = 0
LL_f += Fit_Lognormal_3P.logf(T_f, params[0], params[1], params[2]).sum() # failure times
LL_rc += Fit_Lognormal_3P.logR(T_rc, params[0], params[1], params[2]).sum() # right censored times
return -(LL_f + LL_rc)
self.mu = np.log(sp[2])
self.sigma = sp[0]
self.gamma = sp[1]
params = [self.mu, self.sigma, self.gamma]
self.loglik2 = LL2
if n - k - 1 > 0:
self.AICc = 2 * k + LL2 + (2 * k ** 2 + 2 * k) / (n - k - 1)
else:
self.AICc = 'Insufficient data'
self.BIC = np.log(n) * k + LL2
self.distribution = Lognormal_Distribution(mu=self.mu, sigma=self.sigma, gamma=self.gamma)
# confidence interval estimates of parameters
Z = -ss.norm.ppf((1 - CI) / 2)
hessian_matrix = hessian(Fit_Lognormal_3P.LL)(np.array(tuple(params)), np.array(tuple(failures)), np.array(tuple(right_censored)))
covariance_matrix = np.linalg.inv(hessian_matrix)
self.mu_SE = abs(covariance_matrix[0][0]) ** 0.5
self.sigma_SE = abs(covariance_matrix[1][1]) ** 0.5
self.gamma_SE = abs(covariance_matrix[2][2]) ** 0.5
self.mu_upper = self.mu + (Z * self.mu_SE) # Mu can be positive or negative.
self.mu_lower = self.mu + (-Z * self.mu_SE)
self.sigma_upper = self.sigma * (np.exp(Z * (self.sigma_SE / self.sigma))) # sigma is strictly positive
self.sigma_lower = self.sigma * (np.exp(-Z * (self.sigma_SE / self.sigma)))
self.gamma_upper = self.gamma * (np.exp(Z * (self.gamma_SE / self.gamma))) # here we assume gamma can only be positive as there are bounds placed on it in the optimizer. Minitab assumes positive or negative so bounds are different
self.gamma_lower = self.gamma * (np.exp(-Z * (self.gamma_SE / self.gamma)))
Data = {'Parameter': ['Mu', 'Sigma', 'Gamma'],
'Point Estimate': [self.mu, self.sigma, self.gamma],
'Standard Error': [self.mu_SE, self.sigma_SE, self.gamma_SE],
'Lower CI': [self.mu_lower, self.sigma_lower, self.gamma_lower],
'Upper CI': [self.mu_upper, self.sigma_upper, self.gamma_upper]}
def LL(params, T_f, T_rc): # log likelihood function (3 parameter Lognormal)
LL_f = 0
LL_rc = 0
LL_f += Fit_Lognormal_3P.logf(T_f, params[0], params[1], params[2]).sum() # failure times
LL_rc += Fit_Lognormal_3P.logR(T_rc, params[0], params[1], params[2]).sum() # right censored times
return -(LL_f + LL_rc)
label = str('Fitted Lognormal_2P (μ=' + str(round_to_decimals(mu, dec)) + ', σ=' + str(round_to_decimals(sigma, dec)) + ')')
if 'color' in kwargs:
color = kwargs.pop('color')
data_color = color
else:
color = 'red'
data_color = 'k'
plt.xlabel('Time')
elif fit_gamma is True:
if __fitted_dist_params is not None:
mu = __fitted_dist_params.mu
sigma = __fitted_dist_params.sigma
gamma = __fitted_dist_params.gamma
else:
from reliability.Fitters import Fit_Lognormal_3P
fit = Fit_Lognormal_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
mu = fit.mu
sigma = fit.sigma
gamma = fit.gamma
lnf = Lognormal_Distribution(mu=mu, sigma=sigma).CDF(show_plot=False, xvals=xvals)
if 'label' in kwargs:
label = kwargs.pop('label')
else:
label = str('Fitted Lognormal_3P (μ=' + str(round_to_decimals(mu, dec)) + ', σ=' + str(round_to_decimals(sigma, dec)) + ', γ=' + str(round_to_decimals(gamma, dec)) + ')')
if 'color' in kwargs:
color = kwargs.pop('color')
data_color = color
else:
color = 'red'
data_color = 'k'
plt.xlabel('Time - gamma')
failures = failures - gamma
self.__Gamma_3P_params = Fit_Gamma_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Gamma_3P_alpha = self.__Gamma_3P_params.alpha
self.Gamma_3P_beta = self.__Gamma_3P_params.beta
self.Gamma_3P_gamma = self.__Gamma_3P_params.gamma
self.Gamma_3P_BIC = self.__Gamma_3P_params.BIC
self.Gamma_3P_AICc = self.__Gamma_3P_params.AICc
self._parametric_CDF_Gamma_3P = self.__Gamma_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Expon_2P_params = Fit_Expon_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Expon_2P_lambda = self.__Expon_2P_params.Lambda
self.Expon_2P_gamma = self.__Expon_2P_params.gamma
self.Expon_2P_BIC = self.__Expon_2P_params.BIC
self.Expon_2P_AICc = self.__Expon_2P_params.AICc
self._parametric_CDF_Exponential_2P = self.__Expon_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Lognormal_3P_params = Fit_Lognormal_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Lognormal_3P_mu = self.__Lognormal_3P_params.mu
self.Lognormal_3P_sigma = self.__Lognormal_3P_params.sigma
self.Lognormal_3P_gamma = self.__Lognormal_3P_params.gamma
self.Lognormal_3P_BIC = self.__Lognormal_3P_params.BIC
self.Lognormal_3P_AICc = self.__Lognormal_3P_params.AICc
self._parametric_CDF_Lognormal_3P = self.__Lognormal_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Normal_2P_params = Fit_Normal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Normal_2P_mu = self.__Normal_2P_params.mu
self.Normal_2P_sigma = self.__Normal_2P_params.sigma
self.Normal_2P_BIC = self.__Normal_2P_params.BIC
self.Normal_2P_AICc = self.__Normal_2P_params.AICc
self._parametric_CDF_Normal_2P = self.__Normal_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Lognormal_2P_params = Fit_Lognormal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Lognormal_2P_mu = self.__Lognormal_2P_params.mu