How to use climpred - 10 common examples

To help you get started, we’ve selected a few climpred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bradyrx / climpred / climpred / test_prediction.py View on Github external
def test_compute_perfect_model_ds_not_nan(PM_ds_ds, PM_ds_control, metric, comparison):
    actual = compute_perfect_model(
        PM_ds_ds, PM_ds_control, metric=metric, comparison=comparison).isnull().any()
    assert actual == False
github bradyrx / climpred / climpred / test_prediction.py View on Github external
def test_compute_perfect_model_da_not_nan(PM_da_ds, PM_da_control, metric, comparison):
    actual = compute_perfect_model(
        PM_da_ds, PM_da_control, metric=metric, comparison=comparison).isnull().any()
    assert actual == False
github bradyrx / climpred / climpred / metrics.py View on Github external
| **maximum**     | ∞               |
        +-----------------+-----------------+
        | **perfect**     | N/A             |
        +-----------------+-----------------+
        | **orientation** | positive        |
        +-----------------+-----------------+

    Reference:
        * Bretherton, Christopher S., et al. "The effective number of spatial degrees of
          freedom of a time-varying field." Journal of climate 12.7 (1999): 1990-2009.
    """
    skipna = metric_kwargs.get('skipna', False)
    return effective_sample_size(forecast, verif, dim=dim, skipna=skipna)


__effective_sample_size = Metric(
    name='effective_sample_size',
    function=_effective_sample_size,
    positive=True,
    probabilistic=False,
    unit_power=0.0,
    long_name='Effective sample size for temporally correlated data',
    aliases=['n_eff', 'eff_n'],
    minimum=0.0,
    maximum=np.inf,
)


def _pearson_r_eff_p_value(forecast, verif, dim=None, **metric_kwargs):
    """Probability that forecast and verification data are linearly uncorrelated, accounting
    for autocorrelation.
github bradyrx / climpred / climpred / metrics.py View on Github external
Climate Dynamics, June 9, 2018. https://doi.org/10/gd7hfq.
    """
    mse_skill = __mse.function(forecast, verif, dim=dim, **metric_kwargs)
    var = verif.var(dim)
    if 'comparison' in metric_kwargs:
        comparison = metric_kwargs['comparison']
    else:
        raise ValueError(
            'Comparison needed to normalize MSSS. Not found in', metric_kwargs
        )
    fac = _get_norm_factor(comparison)
    msess_skill = 1 - mse_skill / var / fac
    return msess_skill


__msess = Metric(
    name='msess',
    function=_msess,
    positive=True,
    probabilistic=False,
    unit_power=0,
    long_name='Mean Squared Error Skill Score',
    aliases=['ppp', 'msss'],
    minimum=-np.inf,
    maximum=1.0,
    perfect=1.0,
)


def _mape(forecast, verif, dim=None, **metric_kwargs):
    """Mean Absolute Percentage Error (MAPE).
github bradyrx / climpred / climpred / utils.py View on Github external
This allows the user to submit a string representing the desired metric
    to the corresponding metric class.

    Currently compatable with functions:
    * compute_persistence()
    * compute_perfect_model()
    * compute_hindcast()

    Args:
        metric (str): name of metric.
        list_ (list): check whether metric in list

    Returns:
        metric (Metric): class object of the metric.
    """
    if isinstance(metric, metrics.Metric):
        return metric
    elif isinstance(metric, str):
        # check if metric allowed
        is_in_list(metric, list_, 'metric')
        metric = METRIC_ALIASES.get(metric, metric)
        return getattr(metrics, '__' + metric)
    else:
        raise ValueError(
            f'Please provide metric as str or Metric class, found {type(metric)}'
        )
github bradyrx / climpred / climpred / bias_reduction.py View on Github external
should be based on the same set of verification dates.
        cross_validate (bool): Use properly defined mean bias reduction function. This
            excludes the given initialization from the bias calculation. With False,
            include the given initialization in the calculation, which is much faster
            and but slightly improves skill with a large N of initializations.
            Defaults to True.

    Returns:
        HindcastEnsemble: bias reduced hindcast.

    """

    def bias_func(a, b, **kwargs):
        return a - b

    bias_metric = Metric('bias', bias_func, True, False, 1)

    bias = hindcast.verify(
        metric=bias_metric, comparison='e2r', dim=None, alignment=alignment
    ).squeeze()

    if cross_validate:
        mean_bias_func = _mean_bias_reduction_quick
    else:
        mean_bias_func = _mean_bias_reduction_cross_validate

    bias_reduced_hind = mean_bias_func(hindcast._datasets['initialized'], bias, 'time')
    hindcast_bias_reduced = hindcast.copy()
    hindcast_bias_reduced._datasets['initialized'] = bias_reduced_hind
    return hindcast_bias_reduced
github bradyrx / climpred / climpred / metrics.py View on Github external
Reference:
        * Matheson, James E., and Robert L. Winkler. “Scoring Rules for
          Continuous Probability Distributions.” Management Science 22, no. 10
          (June 1, 1976): 108796. https://doi.org/10/cwwt4g.
        * https://www.lokad.com/continuous-ranked-probability-score

    See also:
        * properscoring.crps_ensemble
        * xskillscore.crps_ensemble
    """
    weights = metric_kwargs.get('weights', None)
    # switch positions because xskillscore.crps_ensemble(verif, forecasts)
    return crps_ensemble(verif, forecast, weights=weights)


__crps = Metric(
    name='crps',
    function=_crps,
    positive=False,
    probabilistic=True,
    unit_power=1.0,
    long_name='Continuous Ranked Probability Score',
    minimum=0.0,
    maximum=np.inf,
    perfect=0.0,
)


def _crps_gaussian(forecast, mu, sig, **metric_kwargs):
    """Computes the CRPS of verification data ``o`` relative to normally distributed
    forecasts with mean ``mu`` and standard deviation ``sig``.
github bradyrx / climpred / climpred / metrics.py View on Github external
* climpred.spearman_r_eff_p_value

    Reference:
        * Bretherton, Christopher S., et al. "The effective number of spatial degrees of
          freedom of a time-varying field." Journal of climate 12.7 (1999): 1990-2009.
    """
    skipna = metric_kwargs.get('skipna', False)
    # p value returns a runtime error when working with NaNs, such as on a climate
    # model grid. We can avoid this annoying output by specifically suppressing
    # warning here.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        return pearson_r_eff_p_value(forecast, verif, dim=dim, skipna=skipna,)


__pearson_r_eff_p_value = Metric(
    name='pearson_r_eff_p_value',
    function=_pearson_r_eff_p_value,
    positive=False,
    probabilistic=False,
    unit_power=0.0,
    long_name=(
        "Pearson's Anomaly correlation coefficient "
        'p value using the effective sample size'
    ),
    aliases=['p_pval_eff', 'pvalue_eff', 'pval_eff'],
    minimum=0.0,
    maximum=1.0,
    perfect=0.0,
)
github bradyrx / climpred / climpred / metrics.py View on Github external
| **maximum**     | ∞         |
        +-----------------+-----------+
        | **perfect**     | 0.0       |
        +-----------------+-----------+
        | **orientation** | negative  |
        +-----------------+-----------+

    Reference:
        * https://www.cawcr.gov.au/projects/verification/
        * https://www-miklip.dkrz.de/about/murcss/
    """
    bias = (forecast - verif).mean(dim)
    return bias


__unconditional_bias = Metric(
    name='unconditional_bias',
    function=_unconditional_bias,
    positive=False,
    probabilistic=False,
    unit_power=1,
    long_name='Unconditional bias',
    aliases=['u_b', 'bias'],
    minimum=-np.inf,
    maximum=np.inf,
    perfect=0.0,
)


def _conditional_bias(forecast, verif, dim=None, **metric_kwargs):
    """Conditional bias between forecast and verification data.
github bradyrx / climpred / climpred / metrics.py View on Github external
* climpred.pearson_r_eff_p_value

    Reference:
        * Bretherton, Christopher S., et al. "The effective number of spatial degrees of
          freedom of a time-varying field." Journal of climate 12.7 (1999): 1990-2009.
    """
    skipna = metric_kwargs.get('skipna', False)
    # p value returns a runtime error when working with NaNs, such as on a climate
    # model grid. We can avoid this annoying output by specifically suppressing
    # warning here.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        return spearman_r_eff_p_value(forecast, verif, dim=dim, skipna=skipna)


__spearman_r_eff_p_value = Metric(
    name='spearman_r_eff_p_value',
    function=_spearman_r_eff_p_value,
    positive=False,
    probabilistic=False,
    unit_power=0.0,
    long_name=(
        "Spearman's Rank correlation coefficient "
        'p value using the effective sample size'
    ),
    aliases=['s_pval_eff', 'spvalue_eff', 'spval_eff'],
    minimum=0.0,
    maximum=1.0,
    perfect=0.0,
)