How to use the pytesmo.metrics.rmsd function in pytesmo

To help you get started, we’ve selected a few pytesmo examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github TUW-GEO / pytesmo / tests / test_validation_framwork / test_metric_calculators.py View on Github external
with warnings.catch_warnings():
        warnings.simplefilter("ignore") # many warnings due to test data

        metriccalc = RollingMetrics(other_name='k1')
        dataset = metriccalc.calc_metrics(data, gpi_info=(0, 0, 0), center=False)

    # test pearson r
    ref_array = df['ref'].rolling('30d').corr(df['k1'])
    np.testing.assert_almost_equal(dataset['R'][0], ref_array.values)

    # test rmsd
    indexer = np.arange(30)[None, :] + np.arange(len(df)-30)[:, None]
    rmsd_arr = []
    for i in range(indexer.shape[0]):
        rmsd_arr.append(metrics.rmsd(df['ref'][indexer[i, :]],
                                     df['k1'][indexer[i, :]]))

    rmsd_arr = np.array(rmsd_arr)
    np.testing.assert_almost_equal(dataset['RMSD'][0][29:-1], rmsd_arr)
github TUW-GEO / pytesmo / tests / test_metrics.py View on Github external
# example 1
    x = np.arange(10)
    y = np.arange(10) + 2

    rmsd_pred = 2.
    rmsd_obs = met.rmsd(x, y)

    nptest.assert_equal(rmsd_obs, rmsd_pred)

    # example 2, with outlier
    x = np.arange(10)
    y = np.arange(10) + 2
    y[-1] = 100.

    rmsd_pred = np.sqrt(831.7)
    rmsd_obs = met.rmsd(x, y)

    nptest.assert_almost_equal(rmsd_obs, rmsd_pred, 6)
github TUW-GEO / pytesmo / tests / test_metrics.py View on Github external
nptest.assert_equal(ubrmsd_obs, ubrmsd_pred)
    # aslo check consistency with direct formula
    ubrmsd_direct = np.sqrt(met.rmsd(x, y) ** 2 - met.bias(x, y)**2)
    nptest.assert_equal(ubrmsd_obs, ubrmsd_direct)

    # example 2, with outlier
    x = np.arange(10)
    y = np.arange(10) + 2
    y[-1] = 100.

    ubrmsd_pred = 26.7
    ubrmsd_obs = met.ubrmsd(x, y)

    nptest.assert_almost_equal(ubrmsd_obs, ubrmsd_pred, 6)
    # aslo check consistency with direct formula
    ubrmsd_direct = np.sqrt(met.rmsd(x, y) ** 2 - met.bias(x, y)**2)
    nptest.assert_almost_equal(ubrmsd_obs, ubrmsd_direct)
github TUW-GEO / pytesmo / tests / test_metrics.py View on Github external
def test_ubrmsd():
    """
    Test for ubrmsd
    """
    # example 1
    x = np.arange(10)
    y = np.arange(10) + 2

    ubrmsd_pred = 0
    ubrmsd_obs = met.ubrmsd(x, y)

    nptest.assert_equal(ubrmsd_obs, ubrmsd_pred)
    # aslo check consistency with direct formula
    ubrmsd_direct = np.sqrt(met.rmsd(x, y) ** 2 - met.bias(x, y)**2)
    nptest.assert_equal(ubrmsd_obs, ubrmsd_direct)

    # example 2, with outlier
    x = np.arange(10)
    y = np.arange(10) + 2
    y[-1] = 100.

    ubrmsd_pred = 26.7
    ubrmsd_obs = met.ubrmsd(x, y)

    nptest.assert_almost_equal(ubrmsd_obs, ubrmsd_pred, 6)
    # aslo check consistency with direct formula
    ubrmsd_direct = np.sqrt(met.rmsd(x, y) ** 2 - met.bias(x, y)**2)
    nptest.assert_almost_equal(ubrmsd_obs, ubrmsd_direct)
github TUW-GEO / pytesmo / tests / test_metrics.py View on Github external
def test_rmsd():
    """
    Test for rmsd
    """
    # example 1
    x = np.arange(10)
    y = np.arange(10) + 2

    rmsd_pred = 2.
    rmsd_obs = met.rmsd(x, y)

    nptest.assert_equal(rmsd_obs, rmsd_pred)

    # example 2, with outlier
    x = np.arange(10)
    y = np.arange(10) + 2
    y[-1] = 100.

    rmsd_pred = np.sqrt(831.7)
    rmsd_obs = met.rmsd(x, y)

    nptest.assert_almost_equal(rmsd_obs, rmsd_pred, 6)
github TUW-GEO / pytesmo / tests / test_metrics.py View on Github external
def test_rmsd_mse():
    """
    Test for rmsd and mse
    """
    # example 1
    x = np.random.randn(1000)
    y = np.random.randn(1000)

    rmsd_pred = met.rmsd(x, y)
    mse_pred, _, _, _ = met.mse(x, y)

    nptest.assert_almost_equal(rmsd_pred ** 2, mse_pred, 6)
github TUW-GEO / pytesmo / src / pytesmo / validation_framework / metric_calculators.py View on Github external
Notes
        -----
        Kendall tau is calculation is optional at the moment
        because the scipy implementation is very slow which is problematic for
        global comparisons

        """
        dataset = super(BasicMetrics, self).calc_metrics(data, gpi_info)

        if len(data) < 10:
            return dataset

        x, y = data['ref'].values, data[self.other_name].values
        R, p_R = metrics.pearsonr(x, y)
        rho, p_rho = metrics.spearmanr(x, y)
        RMSD = metrics.rmsd(x, y)
        BIAS = metrics.bias(x, y)

        dataset['R'][0], dataset['p_R'][0] = R, p_R
        dataset['rho'][0], dataset['p_rho'][0] = rho, p_rho
        dataset['RMSD'][0] = RMSD
        dataset['BIAS'][0] = BIAS
        dataset['n_obs'][0] = len(data)

        if self.calc_tau:
            tau, p_tau = metrics.kendalltau(x, y)
            dataset['tau'][0], dataset['p_tau'][0] = tau, p_tau

        return dataset
github TUW-GEO / pytesmo / src / pytesmo / df_metrics.py View on Github external
def rmsd(df):
    """Root-mean-square deviation

    Returns
    -------
    result : namedtuple
        with column names of df for which the calculation
        was done as name of the
        element separated by '_and_'

    See Also
    --------
    pytesmo.metrics.rmsd
    """
    return _dict_to_namedtuple(nwise_apply(df, metrics.rmsd, n=2, comm=True),
                               'rmsd')