Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
runs += 1
if inv is True:
result = minimize(value_and_grad(Fit_Expon_2P.LL_inv), guess, args=(failures, right_censored), jac=True, method='L-BFGS-B', bounds=bnds2)
if result.success is False or inv is False:
if runs == 1:
guess = [1 / sp[1], self.gamma] # fix the guess to be the non-inverted form
self.initial_guess = guess
result = minimize(value_and_grad(Fit_Expon_2P.LL), guess, args=(failures, right_censored), jac=True, method='L-BFGS-B', bounds=bnds2)
inv = False # inversion status changed for subsequent loops
params = result.x
guess = [params[0], params[1]]
if inv is False:
LL2 = 2 * Fit_Expon_2P.LL(guess, failures, right_censored)
else:
LL2 = 2 * Fit_Expon_2P.LL_inv(guess, failures, right_censored)
BIC_array.append(np.log(n) * k + LL2)
delta_BIC = abs(BIC_array[-1] - BIC_array[-2])
if result.success is True:
params = result.x
self.success = True
if inv is False:
self.Lambda = params[0]
else:
self.Lambda = 1 / params[0]
self.gamma = params[1]
else:
self.success = False
print('WARNING: Fitting using Autograd FAILED for Expon_2P. The fit from Scipy was used instead so results may not be accurate.')
sp = ss.expon.fit(all_data, optimizer='powell')
self.Lambda = sp[1]
label = kwargs.pop('label')
else:
label = str('Fitted Exponential_1P (λ=' + str(round_to_decimals(Lambda, dec)) + ')')
if 'color' in kwargs: ####
data_color = kwargs.get('color') ####
else: ####
data_color = 'k' ####
xlabel = 'Time' ####
elif fit_gamma is True:
if __fitted_dist_params is not None:
Lambda = __fitted_dist_params.Lambda
Lambda_SE = __fitted_dist_params.Lambda_SE ####
gamma = __fitted_dist_params.gamma ####
else:
from reliability.Fitters import Fit_Expon_2P
fit = Fit_Expon_2P(failures=failures, right_censored=right_censored, CI=CI, show_probability_plot=False, print_results=False)
Lambda = fit.Lambda
Lambda_SE = fit.Lambda_SE ####
gamma = fit.gamma ####
if 'label' in kwargs:
label = kwargs.pop('label')
else:
label = str('Fitted Exponential_2P\n(λ=' + str(round_to_decimals(Lambda, dec)) + ', γ=' + str(round_to_decimals(gamma, dec)) + ')')
if 'color' in kwargs: ####
data_color = kwargs.get('color') ####
else: ####
data_color = 'k' ####
xlabel = 'Time - gamma' ####
failures = failures - gamma + 0.009 # this 0.009 adjustment is to avoid taking the log of 0. It causes negligible difference to the fit and plot. 0.009 is chosen to be the same as Weibull_Fit_3P adjustment.
if right_censored is not None:
right_censored = right_censored - gamma + 0.009 # this 0.009 adjustment is to avoid taking the log of 0. It causes negligible difference to the fit and plot. 0.009 is chosen to be the same as Weibull_Fit_3P adjustment.
def LL(params, T_f, T_rc): # log likelihood function (2 parameter Expon)
LL_f = 0
LL_rc = 0
LL_f += Fit_Expon_2P.logf(T_f, params[0], params[1]).sum() # failure times
LL_rc += Fit_Expon_2P.logR(T_rc, params[0], params[1]).sum() # right censored times
return -(LL_f + LL_rc)
# The reason for having an inverted and non-inverted cases is due to the gradient being too shallow in some cases. If Lambda<1 we invert it so it's bigger. This prevents the gradient getting too shallow for the optimizer to find the correct minimum.
while delta_BIC > 0.001 and runs < 5: # exits after BIC convergence or 5 iterations
runs += 1
if inv is True:
result = minimize(value_and_grad(Fit_Expon_2P.LL_inv), guess, args=(failures, right_censored), jac=True, method='L-BFGS-B', bounds=bnds2)
if result.success is False or inv is False:
if runs == 1:
guess = [1 / sp[1], self.gamma] # fix the guess to be the non-inverted form
self.initial_guess = guess
result = minimize(value_and_grad(Fit_Expon_2P.LL), guess, args=(failures, right_censored), jac=True, method='L-BFGS-B', bounds=bnds2)
inv = False # inversion status changed for subsequent loops
params = result.x
guess = [params[0], params[1]]
if inv is False:
LL2 = 2 * Fit_Expon_2P.LL(guess, failures, right_censored)
else:
LL2 = 2 * Fit_Expon_2P.LL_inv(guess, failures, right_censored)
BIC_array.append(np.log(n) * k + LL2)
delta_BIC = abs(BIC_array[-1] - BIC_array[-2])
if result.success is True:
params = result.x
self.success = True
if inv is False:
self.Lambda = params[0]
else:
self.Lambda = 1 / params[0]
self.gamma = params[1]
else:
self.success = False
print('WARNING: Fitting using Autograd FAILED for Expon_2P. The fit from Scipy was used instead so results may not be accurate.')
label = kwargs.pop('label')
else:
label = str('Fitted Exponential_1P (λ=' + str(round_to_decimals(Lambda, dec)) + ')')
if 'color' in kwargs:
data_color = kwargs.get('color')
else:
data_color = 'k'
xlabel = 'Time'
elif fit_gamma is True:
if __fitted_dist_params is not None:
Lambda = __fitted_dist_params.Lambda
Lambda_SE = __fitted_dist_params.Lambda_SE
gamma = __fitted_dist_params.gamma
else:
from reliability.Fitters import Fit_Expon_2P
fit = Fit_Expon_2P(failures=failures, right_censored=right_censored, CI=CI, show_probability_plot=False, print_results=False)
Lambda = fit.Lambda
Lambda_SE = fit.Lambda_SE
gamma = fit.gamma
if 'label' in kwargs:
label = kwargs.pop('label')
else:
label = str('Fitted Exponential_2P\n(λ=' + str(round_to_decimals(Lambda, dec)) + ', γ=' + str(round_to_decimals(gamma, dec)) + ')')
if 'color' in kwargs:
data_color = kwargs.get('color')
else:
data_color = 'k'
xlabel = 'Time - gamma'
failures = failures - gamma
if right_censored is not None:
right_censored = right_censored - gamma
def LL_inv(params, T_f, T_rc): # log likelihood function (2 parameter Expon)
LL_f = 0
LL_rc = 0
LL_f += Fit_Expon_2P.logf(T_f, 1 / params[0], params[1]).sum() # failure times
LL_rc += Fit_Expon_2P.logR(T_rc, 1 / params[0], params[1]).sum() # right censored times
return -(LL_f + LL_rc)
self.Weibull_3P_alpha = self.__Weibull_3P_params.alpha
self.Weibull_3P_beta = self.__Weibull_3P_params.beta
self.Weibull_3P_gamma = self.__Weibull_3P_params.gamma
self.Weibull_3P_BIC = self.__Weibull_3P_params.BIC
self.Weibull_3P_AICc = self.__Weibull_3P_params.AICc
self._parametric_CDF_Weibull_3P = self.__Weibull_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Gamma_3P_params = Fit_Gamma_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Gamma_3P_alpha = self.__Gamma_3P_params.alpha
self.Gamma_3P_beta = self.__Gamma_3P_params.beta
self.Gamma_3P_gamma = self.__Gamma_3P_params.gamma
self.Gamma_3P_BIC = self.__Gamma_3P_params.BIC
self.Gamma_3P_AICc = self.__Gamma_3P_params.AICc
self._parametric_CDF_Gamma_3P = self.__Gamma_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Expon_2P_params = Fit_Expon_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Expon_2P_lambda = self.__Expon_2P_params.Lambda
self.Expon_2P_gamma = self.__Expon_2P_params.gamma
self.Expon_2P_BIC = self.__Expon_2P_params.BIC
self.Expon_2P_AICc = self.__Expon_2P_params.AICc
self._parametric_CDF_Exponential_2P = self.__Expon_2P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Lognormal_3P_params = Fit_Lognormal_3P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Lognormal_3P_mu = self.__Lognormal_3P_params.mu
self.Lognormal_3P_sigma = self.__Lognormal_3P_params.sigma
self.Lognormal_3P_gamma = self.__Lognormal_3P_params.gamma
self.Lognormal_3P_BIC = self.__Lognormal_3P_params.BIC
self.Lognormal_3P_AICc = self.__Lognormal_3P_params.AICc
self._parametric_CDF_Lognormal_3P = self.__Lognormal_3P_params.distribution.CDF(xvals=d, show_plot=False)
self.__Normal_2P_params = Fit_Normal_2P(failures=failures, right_censored=right_censored, show_probability_plot=False, print_results=False)
self.Normal_2P_mu = self.__Normal_2P_params.mu