How to use the verde.BlockMean function in verde

To help you get started, we’ve selected a few verde examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fatiando / verde / dev / _downloads / 8e59c4ed612021f145f74b62d5b36182 / weights.py View on Github external
proj_coords = projection(data.longitude.values, data.latitude.values)

region = vd.get_region(coordinates)
spacing = 5 / 60

########################################################################################
# Now we can grid our data using a weighted spline. We'll use the block mean results
# with uncertainty based weights.
#
# Note that the weighted spline solution will only work on a non-exact interpolation. So
# we'll need to use some damping regularization or not use the data locations for the
# point forces. Here, we'll apply a bit of damping.
spline = vd.Chain(
    [
        # Convert the spacing to meters because Spline is a Cartesian gridder
        ("mean", vd.BlockMean(spacing=spacing * 111e3, uncertainty=True)),
        ("spline", vd.Spline(damping=1e-10)),
    ]
).fit(proj_coords, data.velocity_up, data.weights)
grid = spline.grid(
    region=region,
    spacing=spacing,
    projection=projection,
    dims=["latitude", "longitude"],
    data_names=["velocity"],
)

########################################################################################
# Calculate an unweighted spline as well for comparison.
spline_unweighted = vd.Chain(
    [
        ("mean", vd.BlockReduce(np.mean, spacing=spacing * 111e3)),
github fatiando / verde / dev / _downloads / f808b7408e6a6dfc54a537fefabea4db / spline_weights.py View on Github external
# We'll test this on the California vertical GPS velocity data because it comes with the
# uncertainties
data = vd.datasets.fetch_california_gps()
coordinates = (data.longitude.values, data.latitude.values)

# Use a Mercator projection for our Cartesian gridder
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())

# Now we can chain a block weighted mean and weighted spline together. We'll use
# uncertainty propagation to calculate the new weights from block mean because our data
# vary smoothly but have different uncertainties.
spacing = 5 / 60  # 5 arc-minutes
chain = vd.Chain(
    [
        ("mean", vd.BlockMean(spacing=spacing * 111e3, uncertainty=True)),
        ("spline", vd.Spline(damping=1e-10)),
    ]
)
print(chain)

# Split the data into a training and testing set. We'll use the training set to grid the
# data and the testing set to validate our spline model. Weights need to
# 1/uncertainty**2 for the error propagation in BlockMean to work.
train, test = vd.train_test_split(
    projection(*coordinates),
    data.velocity_up,
    weights=1 / data.std_up ** 2,
    random_state=0,
)
# Fit the model on the training set
chain.fit(*train)
github fatiando / verde / tutorials / vectors.py View on Github external
weights=(1 / data.std_east ** 2, 1 / data.std_north ** 2),
    random_state=1,
)

########################################################################################
# Now we can make a 2-component spline. Since :class:`verde.Vector` implements
# ``fit``, ``predict``, and ``filter``, we can use it in a :class:`verde.Chain` to build
# a pipeline.
#
# We need to use a bit of damping so that the weights can be taken into account. Splines
# without damping provide a perfect fit to the data and ignore the weights as a
# consequence.

chain = vd.Chain(
    [
        ("mean", vd.BlockMean(spacing=spacing * 111e3, uncertainty=True)),
        ("trend", vd.Vector([vd.Trend(1), vd.Trend(1)])),
        ("spline", vd.Vector([vd.Spline(damping=1e-10), vd.Spline(damping=1e-10)])),
    ]
)
print(chain)

########################################################################################
#
# .. warning::
#
#     Never generate the component gridders with ``[vd.Spline()]*2``. This will result
#     in each component being a represented by **the same Spline object**, causing
#     problems when trying to fit it to different components.
#
# Fitting the spline and gridding is exactly the same as what we've done before.
github fatiando / verde / examples / blockreduce_weights_mean.py View on Github external
import numpy as np
import verde as vd

# We'll test this on the California vertical GPS velocity data because it comes with the
# uncertainties
data = vd.datasets.fetch_california_gps()
coordinates = (data.longitude, data.latitude)

# We'll calculate the mean on large blocks to show the effect of the different weighting
# schemes
spacing = 30 / 60
# It's important that the weights are given as 1/sigma**2 for the uncertainty
# propagation. In this case, you should not use verde.variance_to_weights because it
# would normalize the weights.
weights = 1 / data.std_up ** 2
reducer = vd.BlockMean(spacing, center_coordinates=True)
# First produce the weighted variance weights
variance_weights = reducer.filter(coordinates, data.velocity_up, weights)[-1]
# And now produce the uncertainty propagation weights
reducer.set_params(uncertainty=True)
block_coords, velocity, uncertainty_weights = reducer.filter(
    coordinates, data.velocity_up, weights
)

# Now we can plot the different weights side by side on Mercator maps
fig, axes = plt.subplots(
    1, 3, figsize=(13.5, 7), subplot_kw=dict(projection=ccrs.Mercator())
)
crs = ccrs.PlateCarree()
titles = ["Variance weights", "Uncertainty weights"]
weight_estimates = [variance_weights, uncertainty_weights]
for ax, title, w in zip(axes[:2], titles, weight_estimates):
github fatiando / verde / dev / _downloads / 8e59c4ed612021f145f74b62d5b36182 / weights.py View on Github external
#
#     w_i = \dfrac{1}{\sigma_i^2}
#     \: , \qquad
#     \sigma_{\bar{d}^*}^2 = \dfrac{1}{\sum\limits_{i=1}^N w_i}
#     \: , \qquad
#     w = \dfrac{1}{\sigma_{\bar{d}^*}^2}
#
# in which :math:`\sigma_i` are the input data uncertainties in the block and
# :math:`\sigma_{\bar{d}^*}` is the propagated uncertainty of the weighted mean in the
# block.
#
# Notice that in this case the output weights reflect the input data uncertainties. Less
# weight is given to the data points that had larger uncertainties from the start.

# Configure BlockMean to assume that the input weights are 1/uncertainty**2
mean = vd.BlockMean(spacing=15 / 60, uncertainty=True)

coordinates, velocity, weights = mean.filter(
    coordinates=(data.longitude, data.latitude),
    data=data.velocity_up,
    weights=data.weights,
)

plot_data(
    coordinates,
    velocity,
    weights,
    "Weighted mean vertical GPS velocity",
    "Weights based on data uncertainty",
)

########################################################################################
github fatiando / verde / dev / _downloads / 8090da9a29970a5fadf377c6a5783dc7 / blockreduce_weights_mean.py View on Github external
import numpy as np
import verde as vd

# We'll test this on the California vertical GPS velocity data because it comes with the
# uncertainties
data = vd.datasets.fetch_california_gps()
coordinates = (data.longitude, data.latitude)

# We'll calculate the mean on large blocks to show the effect of the different weighting
# schemes
spacing = 30 / 60
# It's important that the weights are given as 1/sigma**2 for the uncertainty
# propagation. In this case, you should not use verde.variance_to_weights because it
# would normalize the weights.
weights = 1 / data.std_up ** 2
reducer = vd.BlockMean(spacing, center_coordinates=True)
# First produce the weighted variance weights
variance_weights = reducer.filter(coordinates, data.velocity_up, weights)[-1]
# And now produce the uncertainty propagation weights
reducer.set_params(uncertainty=True)
block_coords, velocity, uncertainty_weights = reducer.filter(
    coordinates, data.velocity_up, weights
)

# Now we can plot the different weights side by side on Mercator maps
fig, axes = plt.subplots(
    1, 3, figsize=(13.5, 7), subplot_kw=dict(projection=ccrs.Mercator())
)
crs = ccrs.PlateCarree()
titles = ["Variance weights", "Uncertainty weights"]
weight_estimates = [variance_weights, uncertainty_weights]
for ax, title, w in zip(axes[:2], titles, weight_estimates):
github fatiando / verde / tutorials / vectors.py View on Github external
# element of the tuple must be an array with the data values for a component of the
# vector data. As with ``coordinates``, **the order of components must be**
# ``(east_component, north_component, up_component)``.
#
#
# Blocked reductions
# ------------------
#
# Operations with :class:`verde.BlockReduce` and :class:`verde.BlockMean` can handle
# multi-component data automatically. The reduction operation is applied to each data
# component separately. The blocked data and weights will be returned in tuples as well
# following the same ordering as the inputs. This will work for an arbitrary number of
# components.

# Use a blocked mean with uncertainty type weights
reducer = vd.BlockMean(spacing=spacing * 111e3, uncertainty=True)
block_coords, block_data, block_weights = reducer.filter(
    coordinates=proj_coords,
    data=(data.velocity_east, data.velocity_north),
    weights=(1 / data.std_east ** 2, 1 / data.std_north ** 2),
)
print(len(block_data), len(block_weights))

########################################################################################
# We can convert the blocked coordinates back to longitude and latitude to plot with
# Cartopy.

block_lon, block_lat = projection(*block_coords, inverse=True)

plt.figure(figsize=(6, 8))
ax = plt.axes(projection=ccrs.Mercator())
crs = ccrs.PlateCarree()