Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
with env:
with rasterio.open(url) as src:
with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt:
expected_shape = (vrt.width, vrt.height)
expected_crs = vrt.crs
expected_res = vrt.res
# Value of single pixel in center of image
lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)
expected_val = next(vrt.sample([(lon, lat)]))
with xr.open_rasterio(vrt) as da:
actual_shape = (da.sizes["x"], da.sizes["y"])
actual_crs = da.crs
actual_res = da.res
actual_val = da.sel(dict(x=lon, y=lat), method="nearest").data
assert_equal(actual_shape, expected_shape)
assert_equal(actual_crs, expected_crs)
assert_equal(actual_res, expected_res)
assert_equal(expected_val, actual_val)
def load_mch(filename, timestep, **importer_kwargs):
R, quality, meta = importer(filename, **importer_kwargs)
x1 = meta["x1"]
y1 = meta["y1"]
xsize = meta["xpixelsize"]
ysize = meta["ypixelsize"]
ds = xr.Dataset(
{"precipitation": (["y", "x"], R[::-1, :])},
coords={
"x": (
["x"],
np.arange(x1 + xsize // 2, x1 + xsize * R.shape[1], xsize),
),
"y": (
["y"],
np.arange(y1 + ysize // 2, y1 + ysize * R.shape[0], ysize),
),
"time": (["time"], [timestep]),
},
)
root = [
"projection",
"x1",
# The file size is fine
return xr.open_mfdataset(paths, concat_dim=concat_dim, **kwargs)
divisor = sqrt(n_chunks)
# Chunking will pretty much 'always' be 2x2, very rarely 3x3 or 4x4. 5x5
# would imply an uncompressed single file of ~6GB! All expected grids
# should be divisible by 2,3 and 4.
if not (n_lat % divisor == 0) or not (n_lon % divisor == 0):
raise ValueError("Can't find a good chunking strategy for the given"
"data source. Are lat/lon coordinates divisible by "
"{}?".format(divisor))
chunks = {lat: n_lat // divisor, lon: n_lon // divisor}
return xr.open_mfdataset(paths, concat_dim=concat_dim, chunks=chunks, **kwargs)
def psd_fft(varr):
_T = len(varr.coords['frame'])
ns = _T // 2 + 1
if _T % 2 == 0:
freq_crd = np.linspace(0, 0.5, ns)
else:
freq_crd = np.linspace(0, 0.5 * (_T - 1) / _T, ns)
print("computing psd of input")
varr_fft = xr.apply_ufunc(
fftw.rfft,
varr.chunk(dict(frame=-1)),
input_core_dims=[['frame']],
output_core_dims=[['freq']],
dask='allowed',
output_sizes=dict(freq=ns),
output_dtypes=[np.complex_])
varr_fft = varr_fft.assign_coords(freq=freq_crd)
varr_psd = 1 / _T * np.abs(varr_fft)**2
return varr_psd
def update_feature_test(cutout, red):
"""atlite should be able to overwrite a feature."""
red.data = cutout.data.drop_vars('influx_direct')
red.prepare('influx', overwrite=True)
assert_equal(red.data.influx_direct, cutout.data.influx_direct)
dims=['markets', 'nsiminds', 'vars'],
attrs={'Desc': 'random draws given for the estimation.'}
)
s_jt = ps2['s_jt'].reshape(-1, ) # s_jt for nmkts * nbransd
self.s_jt = xr.DataArray(
s_jt.reshape((nmkts, nbrands)),
coords=[range(nmkts), range(nbrands),],
dims=['markets', 'brands'],
attrs={'Desc': 'Market share of each brand.'}
)
self.ans = ps2['ans'].reshape(-1, )
Z = np.c_[Z_org[:, 1:], X1[:, 1:]]
self.Z = xr.DataArray(
Z.reshape((self.nmkts, self.nbrands, -1)),
coords=[range(nmkts), range(nbrands), range(Z.shape[-1])],
dims=['markets', 'brands', 'vars'],
attrs={'Desc': 'Instruments'}
)
def test_cusum_OLS(test_data, strucchange_cusum_OLS):
""" Tested against strucchange 1.5.1
"""
y = test_data.pop('y')
X = test_data
# Test sending pandas
result = cu.cusum_OLS(X, y)
assert np.allclose(result.score, strucchange_cusum_OLS[0])
assert np.allclose(result.pvalue, strucchange_cusum_OLS[1])
# And ndarray and xarray
result = cu.cusum_OLS(X.values, xr.DataArray(y, dims=['time']))
assert np.allclose(result.score, strucchange_cusum_OLS[0])
assert np.allclose(result.pvalue, strucchange_cusum_OLS[1])
def test_new_med_std():
stdndwi = NormalisedDifferenceStats('green', 'nir', 'ndwi', stats=['std'])
arr = np.random.uniform(low=-1, high=1, size=(5, 100, 100))
data_array_1 = xr.DataArray(arr, dims=('time', 'y', 'x'),
coords={'time': list(range(5))}, attrs={'crs': 'Fake CRS'})
arr = np.random.uniform(low=-1, high=1, size=(5, 100, 100))
data_array_2 = xr.DataArray(arr, dims=('time', 'y', 'x'),
coords={'time': list(range(5))}, attrs={'crs': 'Fake CRS'})
dataset = xr.Dataset(data_vars={'green': data_array_1, 'nir': data_array_2}, attrs={'crs': 'Fake CRS'})
result = stdndwi.compute(dataset)
assert isinstance(result, xr.Dataset)
assert 'ndwi_std' in result.data_vars
def create_test_dataset(cls, shape, chunks=None):
size = int(np.prod(shape))
dims = ["time", "y", "x"]
a_data = np.linspace(0, 1, size, dtype=np.float64).reshape(shape)
a = xr.DataArray(a_data, dims=dims)
b_data = np.linspace(-1, 0, size, dtype=np.float64).reshape(shape)
b = xr.DataArray(b_data, dims=dims)
if chunks:
a.encoding.update(chunks=chunks, chunksizes=chunks)
b.encoding.update(chunks=chunks, chunksizes=chunks)
return xr.Dataset(dict(a=a, b=b)).chunk(chunks={dims[i]: chunks[i] for i in range(len(dims))})
else:
return xr.Dataset(dict(a=a, b=b))
def foo2_dataset(a, b):
import numpy as np
import xarray as xr
x = np.tile(a + b, (2, 3))
return xr.Dataset({'x': (['t1', 't2'], x)})