Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def pre_init(self):
self._embed = True
#App design
self.format_string = None
self.interface_file = os.path.join(script_dir, 'yaml',
"monitor_mast_interface.yaml")
self.settings = get_config()
self.output_dir = self.settings['outputs']
self.read_new_data()
self.cache_time = Time(0., format='unix')
self.jwst_bar_colors = self.caom_bar_colors = 3
self.jwst_datacols = []
self.caom_datacols = []
``False`` if not.
Returns
-------
image_info : dict
A dictionary containing various information for the given
``file_root``.
"""
# Initialize dictionary to store information
image_info = {}
image_info['all_jpegs'] = []
image_info['suffixes'] = []
image_info['num_ints'] = {}
preview_dir = os.path.join(get_config()['jwql_dir'], 'preview_images')
# Find all of the matching files
dirname = file_root[:7]
search_filepath = os.path.join(FILESYSTEM_DIR, dirname, file_root + '*.fits')
image_info['all_files'] = glob.glob(search_filepath)
for file in image_info['all_files']:
# Get suffix information
suffix = os.path.basename(file).split('_')[4].split('.')[0]
image_info['suffixes'].append(suffix)
# Determine JPEG file location
jpg_dir = os.path.join(preview_dir, dirname)
jpg_filename = os.path.basename(os.path.splitext(file)[0] + '_integ0.jpg')
jpg_filepath = os.path.join(jpg_dir, jpg_filename)
number of files.
Parameters
----------
filepaths : list
A list of full paths to files of interest.
Returns
-------
proposal_info : dict
A dictionary containing various information about the
proposal(s) and files corresponding to the given ``filepaths``.
"""
proposals = list(set([f.split('/')[-1][2:7] for f in filepaths]))
thumbnail_dir = os.path.join(get_config()['jwql_dir'], 'thumbnails')
thumbnail_paths = []
num_files = []
for proposal in proposals:
thumbnail_search_filepath = os.path.join(thumbnail_dir, 'jw{}'.format(proposal), 'jw{}*rate*.thumb'.format(proposal))
thumbnail = glob.glob(thumbnail_search_filepath)
if len(thumbnail) > 0:
thumbnail = thumbnail[0]
thumbnail = '/'.join(thumbnail.split('/')[-2:])
thumbnail_paths.append(thumbnail)
fits_search_filepath = os.path.join(FILESYSTEM_DIR, 'jw{}'.format(proposal), 'jw{}*.fits'.format(proposal))
num_files.append(len(glob.glob(fits_search_filepath)))
# Put the various information into a dictionary of results
proposal_info = {}
proposal_info['num_proposals'] = len(proposals)
def run(self):
"""The main method. See module docstrings for further
details.
"""
logging.info('Begin logging for readnoise_monitor\n')
# Get the output directory and setup a directory to store the data
self.output_dir = os.path.join(get_config()['outputs'], 'readnoise_monitor')
ensure_dir_exists(os.path.join(self.output_dir, 'data'))
# Use the current time as the end time for MAST query
self.query_end = Time.now().mjd
# Loop over all instruments
for instrument in ['nircam', 'niriss']:
self.instrument = instrument
# Identify which database tables to use
self.identify_tables()
# Get a list of all possible apertures for this instrument
siaf = Siaf(self.instrument)
possible_apertures = list(siaf.apertures)
def get_dashboard_components():
"""Build and return dictionaries containing components and html
needed for the dashboard.
Returns
-------
dashboard_components : dict
A dictionary containing components needed for the dashboard.
dashboard_html : dict
A dictionary containing full HTML needed for the dashboard.
"""
output_dir = get_config()['outputs']
name_dict = {'': '',
'monitor_mast': 'Database Monitor',
'monitor_filesystem': 'Filesystem Monitor'}
# Run the cron job monitor to produce an updated table
monitor_cron_jobs.status(production_mode=True)
# Build dictionary of Bokeh components from files in the output directory
dashboard_components = {}
for dir_name, _, file_list in os.walk(output_dir):
monitor_name = os.path.basename(dir_name)
# Only continue if the dashboard knows how to build that monitor
if monitor_name in name_dict.keys():
formatted_monitor_name = name_dict[monitor_name]
dashboard_components[formatted_monitor_name] = {}
def main():
#generate paths
DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database')
DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'nirspec_database.db')
#connect to temporary database
conn = sql.create_connection(DATABASE_FILE)
#do for every file in list above
for path in filenames:
process_file(conn, path)
#close connection
sql.close_connection(conn)
print("done")
logging.info('Completed database search for {} instruments and {} data products.'.
format(instruments, dataproducts))
# Make the table
all_cols = ['instrument']+dataproducts+['total']
table = pd.DataFrame(inventory, columns=all_cols)
# Melt the table
table = pd.melt(table, id_vars=['instrument'],
value_vars=dataproducts,
value_name='files', var_name='dataproduct')
# Plot it
if plot:
# Determine plot location and names
output_dir = get_config()['outputs']
if caom:
output_filename = 'database_monitor_caom'
else:
output_filename = 'database_monitor_jwst'
# Make the plot
plt = Donut(table, label=['instrument', 'dataproduct'], values='files',
text_font_size='12pt', hover_text='files',
name="JWST Inventory", plot_width=600, plot_height=600)
# Save the plot as full html
html_filename = output_filename + '.html'
outfile = os.path.join(output_dir, 'monitor_mast', html_filename)
output_file(outfile)
save(plt)
def run(self):
"""The main method. See module docstrings for further details.
There are 2 parts to the bad pixel monitor:
1. Bad pixels from illuminated data
2. Bad pixels from dark data
For each, we will query MAST, copy new files from the filesystem
and pass the list of copied files into the ``process()`` method.
"""
logging.info('Begin logging for bad_pixel_monitor')
# Get the output directory
self.output_dir = os.path.join(get_config()['outputs'], 'bad_pixel_monitor')
# Read in config file that defines the thresholds for the number
# of dark files that must be present in order for the monitor to run
limits = ascii.read(THRESHOLDS_FILE)
# Use the current time as the end time for MAST query
self.query_end = Time.now().mjd
# Loop over all instruments
for instrument in JWST_INSTRUMENT_NAMES:
self.instrument = instrument
# Identify which database tables to use
self.identify_tables()
# Get a list of all possible apertures from pysiaf
def main():
#generate paths
DATABASE_LOCATION = os.path.join(get_config()['jwql_dir'], 'database')
DATABASE_FILE = os.path.join(DATABASE_LOCATION, 'miri_database.db')
#connect to temporary database
conn = sql.create_connection(DATABASE_FILE)
#process every csv file in directory folder
for path in paths:
process_file(conn, path)
#close connection
sql.close_connection(conn)
print("done")
def _dark_mean_image(self):
"""Update bokeh objects with mean dark image data."""
# Open the mean dark current file and get the data
mean_dark_image_file = self.pixel_table[-1].mean_dark_image_file
mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images')
mean_dark_image_path = os.path.join(mean_slope_dir, mean_dark_image_file)
with fits.open(mean_dark_image_path) as hdulist:
data = hdulist[1].data
# Update the plot with the data and boundaries
y_size, x_size = np.shape(data)
self.refs["mean_dark_source"].data['image'] = [data]
self.refs["stamp_xr"].end = x_size
self.refs["stamp_yr"].end = y_size
self.refs["mean_dark_source"].data['dw'] = [x_size]
self.refs["mean_dark_source"].data['dh'] = [x_size]
# Set the image color scale
self.refs["log_mapper"].high = 0
self.refs["log_mapper"].low = -.2