Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name="dataset_fit", # Sum of all detectors: should end with '_fit'
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
distance_to_sample=quant_distance_to_sample,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="txt", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
if save_tiff is True:
output_folder = 'output_tiff_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name="dataset_fit", # Sum of all detectors: should end with '_fit'
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="tiff", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
if fit_channel_each:
img_dict, data_sets, mdata = read_hdf_APS(working_directory, file_name,
spectrum_cut=spectrum_cut,
load_each_channel=True)
# Find the detector channels and the names of the channels
det_channels = [_ for _ in data_sets.keys() if re.search(r"_det\d+$", _)]
scaler_dict = get_scaler_set(img_dict)
scaler_name_list = list(scaler_dict.keys())
positions_dict = get_positions_set(img_dict)
# Generate dataset
dataset = copy.deepcopy(scaler_dict)
dataset.update(result_map_sum)
# Set parameters for quantitative normalization
param_quant_analysis.experiment_incident_energy = incident_energy_used
param_quant_analysis.experiment_distance_to_sample = quant_distance_to_sample
param_quant_analysis.experiment_detector_channel = "sum"
if save_txt is True:
output_folder = 'output_txt_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name="dataset_fit", # Sum of all detectors: should end with '_fit'
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
distance_to_sample=quant_distance_to_sample,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="txt", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
if save_tiff is True:
output_folder = 'output_tiff_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name="dataset_fit", # Sum of all detectors: should end with '_fit'
scaler_dict = get_scaler_set(img_dict)
scaler_name_list = list(scaler_dict.keys())
positions_dict = get_positions_set(img_dict)
# Generate dataset
dataset = copy.deepcopy(scaler_dict)
dataset.update(result_map_det)
# Set parameters for quantitative normalization
param_quant_analysis.experiment_incident_energy = incident_energy_used
param_quant_analysis.experiment_distance_to_sample = quant_distance_to_sample
param_quant_analysis.experiment_detector_channel = det_channel_names[i]
if save_txt is True:
output_folder = 'output_txt_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name=f"dataset_{det_channel_names[i]}_fit", # ..._det1_fit, etc.
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="txt", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
if save_tiff is True:
output_folder = 'output_tiff_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name=f"dataset_{det_channel_names[i]}_fit", # ..._det1_fit, etc.
quant_norm=quant_norm,
positions_dict = self.img_dict["positions"]
else:
positions_dict = {}
# Scalers are located in a separate dataset in 'img_dict'. They are also referenced
# in each '_fit' dataset (and in the selected dataset 'self.dict_to_plot')
# The list of scaler names is used to avoid attaching the detector channel name
# to file names that contain scaler data (scalers typically do not depend on
# the selection of detector channels.
scaler_dsets = [_ for _ in self.img_dict.keys() if re.search(r"_scaler$", _)]
if scaler_dsets:
scaler_name_list = list(self.img_dict[scaler_dsets[0]].keys())
else:
scaler_name_list = None
output_data(output_dir=output_dir,
interpolate_to_uniform_grid=self.map_interpolation,
dataset_name=self.img_title, quant_norm=self.quantitative_normalization,
param_quant_analysis=self.param_quant_analysis,
dataset_dict=self.dict_to_plot, positions_dict=positions_dict,
file_format=file_format, scaler_name=scaler_v,
scaler_name_list=scaler_name_list)
output_folder = 'output_txt_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name=f"dataset_{det_channel_names[i]}_fit", # ..._det1_fit, etc.
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="txt", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
if save_tiff is True:
output_folder = 'output_tiff_'+prefix_fname
output_path = os.path.join(working_directory, output_folder)
output_data(output_dir=output_path,
interpolate_to_uniform_grid=interpolate_to_uniform_grid,
dataset_name=f"dataset_{det_channel_names[i]}_fit", # ..._det1_fit, etc.
quant_norm=quant_norm,
param_quant_analysis=param_quant_analysis,
dataset_dict=dataset, positions_dict=positions_dict,
file_format="tiff", scaler_name=scaler_name,
scaler_name_list=scaler_name_list,
use_average=use_average)
t1 = time.time()
print(f"Processing time: {t1 - t0}")