Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_validation_n2_k2_temporal_matching_no_matches():
tst_results = {}
datasets = setup_two_without_overlap()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(
dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))
'class': mds1,
'columns': ['x'],
'args': [],
'kwargs': {'limit': 500},
'use_lut': False,
'grids_compatible': True},
'masking2': {
'class': mds2,
'columns': ['x'],
'args': [],
'kwargs': {'limit': 1000},
'use_lut': False,
'grids_compatible': True}
}
process = Validation(
datasets, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(3, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics},
masking_datasets=mds)
gpi_info = (1, 1, 1)
ref_df = datasets['DS1']['class'].read(1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
new_ref_df = process.mask_dataset(ref_df, gpi_info)
assert len(new_ref_df) == 0
nptest.assert_allclose(new_ref_df.x.values, np.arange(1000, 1000))
jobs = process.get_processing_jobs()
'tau': np.array([np.nan], dtype=np.float32),
'gpi': np.array([4], dtype=np.int32),
'RMSD': np.array([0.], dtype=np.float32),
'lon': np.array([4.]),
'p_tau': np.array([np.nan], dtype=np.float32),
'BIAS': np.array([0.], dtype=np.float32),
'p_rho': np.array([0.], dtype=np.float32),
'rho': np.array([1.], dtype=np.float32),
'lat': np.array([4.]),
'R': np.array([1.], dtype=np.float32),
'p_R': np.array([0.], dtype=np.float32)}}
datasets = setup_TestDatasets()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(
dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(3, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))
'tau': np.array([np.nan], dtype=np.float32),
'gpi': np.array([4], dtype=np.int32),
'RMSD': np.array([0.], dtype=np.float32),
'lon': np.array([4.]),
'p_tau': np.array([np.nan], dtype=np.float32),
'BIAS': np.array([0.], dtype=np.float32),
'p_rho': np.array([0.], dtype=np.float32),
'rho': np.array([1.], dtype=np.float32),
'lat': np.array([4.]),
'R': np.array([1.], dtype=np.float32),
'p_R': np.array([0.], dtype=np.float32)}}
datasets = setup_TestDatasets()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))
'class': ismn_reader,
'columns': ['soil moisture'],
},
'ASCAT': {
'class': ascat_reader,
'columns': ['sm'],
'kwargs': {'mask_frozen_prob': 80,
'mask_snow_prob': 80,
'mask_ssf': True},
}}
read_ts_names = {'ASCAT': 'read', 'ISMN': 'read_ts'}
period = [datetime(2007, 1, 1), datetime(2014, 12, 31)]
datasets = DataManager(datasets, 'ISMN', period, read_ts_names=read_ts_names)
process = Validation(
datasets, 'ISMN',
temporal_ref='ASCAT',
scaling='lin_cdf_match',
scaling_ref='ASCAT',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1', metadata_template=metadata_dict_template).calc_metrics},
period=period)
for job in jobs:
results = process.calc(*job)
netcdf_results_manager(results, save_path)
results_fname = os.path.join(
save_path, 'ASCAT.sm_with_ISMN.soil moisture.nc')
vars_should = [u'n_obs', u'tau', u'gpi', u'RMSD', u'lon', u'p_tau',
'columns': ['soil moisture']
},
'ASCAT': {
'class': ascat_reader,
'columns': ['sm'],
'kwargs': {'mask_frozen_prob': 80,
'mask_snow_prob': 80,
'mask_ssf': True}
}}
read_ts_names = {'ASCAT': 'read', 'ISMN': 'read_ts'}
period = [datetime(2007, 1, 1), datetime(2014, 12, 31)]
datasets = DataManager(datasets, 'ISMN', period, read_ts_names=read_ts_names)
process = Validation(
datasets, 'ISMN',
temporal_ref='ASCAT',
scaling='lin_cdf_match',
scaling_ref='ASCAT',
metrics_calculators={
(2, 2): metrics_calculators.RollingMetrics(other_name='k1',
metadata_template=metadata_dict_template).calc_metrics},
period=period)
for job in jobs:
results = process.calc(*job)
netcdf_results_manager(results, save_path, ts_vars=[
'R', 'p_R', 'RMSD'])
results_fname = os.path.join(
save_path, 'ASCAT.sm_with_ISMN.soil moisture.nc')
'columns': ['soil moisture']
},
'ASCAT': {
'class': ascat_reader,
'columns': ['sm'],
'kwargs': {'mask_frozen_prob': 80,
'mask_snow_prob': 80,
'mask_ssf': True}
}}
read_ts_names = {'ASCAT': 'read', 'ISMN': 'read_ts'}
period = [datetime(2007, 1, 1), datetime(2014, 12, 31)]
datasets = DataManager(datasets, 'ISMN', period, read_ts_names=read_ts_names)
process = Validation(
datasets, 'ISMN',
temporal_ref='ASCAT',
scaling='lin_cdf_match',
scaling_ref='ASCAT',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics},
period=period)
for job in jobs:
results = process.calc(*job)
netcdf_results_manager(results, save_path)
results_fname = os.path.join(
save_path, 'ASCAT.sm_with_ISMN.soil moisture.nc')
vars_should = [u'n_obs', u'tau', u'gpi', u'RMSD', u'lon', u'p_tau',
'tau': np.array([np.nan], dtype=np.float32),
'gpi': np.array([4], dtype=np.int32),
'RMSD': np.array([0.], dtype=np.float32),
'lon': np.array([4.]),
'p_tau': np.array([np.nan], dtype=np.float32),
'BIAS': np.array([0.], dtype=np.float32),
'p_rho': np.array([0.], dtype=np.float32),
'rho': np.array([1.], dtype=np.float32),
'lat': np.array([4.]),
'R': np.array([1.], dtype=np.float32),
'p_R': np.array([0.], dtype=np.float32)}}
datasets = setup_three_with_two_overlapping()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(
dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))
'gpi': np.array([4], dtype=np.int32),
'RMSD': np.array([0.], dtype=np.float32),
'lon': np.array([4.]),
'p_tau': np.array([np.nan], dtype=np.float32),
'BIAS': np.array([0.], dtype=np.float32),
'p_rho': np.array([0.], dtype=np.float32),
'rho': np.array([1.], dtype=np.float32),
'lat': np.array([4.]),
'R': np.array([1.], dtype=np.float32),
'p_R': np.array([0.], dtype=np.float32)}}
datasets = setup_TestDatasets()
dm = DataManager(datasets, 'DS1', read_ts_names={d: 'read' for d in ['DS1', 'DS2', 'DS3']})
process = Validation(
dm, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(2, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics})
jobs = process.get_processing_jobs()
for job in jobs:
results = process.calc(*job)
assert sorted(list(results)) == sorted(list(tst_results))
'columns': ['x'],
'args': [],
'kwargs': {'limit': 500},
'use_lut': False,
'grids_compatible': True},
'masking2': {
'class': mds2,
'columns': ['x'],
'args': [],
'kwargs': {'limit': 750},
'use_lut': False,
'grids_compatible': True}
}
process = Validation(
datasets, 'DS1',
temporal_matcher=temporal_matchers.BasicTemporalMatching(
window=1 / 24.0).combinatory_matcher,
scaling='lin_cdf_match',
metrics_calculators={
(3, 2): metrics_calculators.BasicMetrics(other_name='k1').calc_metrics},
masking_datasets=mds)
gpi_info = (1, 1, 1)
ref_df = datasets['DS1']['class'].read(1)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=DeprecationWarning) # read_ts is hard coded when using mask_data
new_ref_df = process.mask_dataset(ref_df, gpi_info)
assert len(new_ref_df) == 250
nptest.assert_allclose(new_ref_df.x.values, np.arange(750, 1000))
jobs = process.get_processing_jobs()