Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
x = np.array([[0, 0, 1, 0, 2],
[5, 0, 0, 3, 0]])
s = sparse.COO.from_numpy(x)
assert_eq(s.clip(min=1), x.clip(min=1))
assert_eq(s.clip(max=3), x.clip(max=3))
assert_eq(s.clip(min=1, max=3), x.clip(min=1, max=3))
assert_eq(s.clip(min=1, max=3.0), x.clip(min=1, max=3.0))
assert_eq(np.clip(s, 1, 3), np.clip(x, 1, 3))
with pytest.raises(ValueError):
s.clip()
out = sparse.COO.from_numpy(np.zeros_like(x))
out2 = s.clip(min=1, max=3, out=out)
assert out is out2
assert_eq(out, x.clip(min=1, max=3))
def test_sparsearray_elemwise(format):
xs = sparse.random((3, 4), density=0.5, format=format)
ys = sparse.random((3, 4), density=0.5, format=format)
x = xs.todense()
y = ys.todense()
fs = sparse.elemwise(operator.add, xs, ys)
assert isinstance(fs, COO)
assert_eq(fs, x + y)
def register_sparse():
import sparse
concatenate_lookup.register(sparse.COO, sparse.concatenate)
tensordot_lookup.register(sparse.COO, sparse.tensordot)
def tensor(data, dtype=None):
if is_sparse(data):
return (data.astype(dtype)
if dtype is not None and dtype != data.dtype
else data)
elif isinstance(data, np.ndarray):
return sparse.COO.from_numpy(data.astype(dtype, copy=False))
else:
return sparse.COO.from_numpy(np.array(data, dtype=dtype))
def register_sparse():
import sparse
concatenate_lookup.register(sparse.COO, sparse.concatenate)
tensordot_lookup.register(sparse.COO, sparse.tensordot)
num_back = count - num_front
# note that num_back is 0 <--> array.size is 0 or 1
# <--> relevant_back_items is []
pprint_str = (
" ".join(relevant_front_items[:num_front])
+ padding
+ " ".join(relevant_back_items[-num_back:])
)
return pprint_str
_KNOWN_TYPE_REPRS = {np.ndarray: "np.ndarray"}
with contextlib.suppress(ImportError):
import sparse
_KNOWN_TYPE_REPRS[sparse.COO] = "sparse.COO"
def inline_dask_repr(array):
"""Similar to dask.array.DataArray.__repr__, but without
redundant information that's already printed by the repr
function of the xarray wrapper.
"""
assert isinstance(array, dask_array_type), array
chunksize = tuple(c[0] for c in array.chunks)
if hasattr(array, "_meta"):
meta = array._meta
if type(meta) in _KNOWN_TYPE_REPRS:
meta_repr = _KNOWN_TYPE_REPRS[type(meta)]
else:
>>> full((2, 2), 9, dtype=float).todense() # doctest: +SKIP
array([[9., 9.],
[9., 9.]])
"""
from sparse import COO
if dtype is None:
dtype = np.array(fill_value).dtype
if not isinstance(shape, tuple):
shape = (shape,)
if compressed_axes is not None:
check_compressed_axes(shape, compressed_axes)
data = np.empty(0, dtype=dtype)
coords = np.empty((len(shape), 0), dtype=np.intp)
return COO(
coords,
data=data,
shape=shape,
fill_value=fill_value,
has_duplicates=False,
sorted=True,
).asformat(format, compressed_axes=compressed_axes)
def _serialize_pydata_sparse(obj):
if isinstance(obj, sparse.COO):
return 'coo', pa.SparseCOOTensor.from_pydata_sparse(obj)
else:
raise NotImplementedError(
"Serialization of {} is not supported.".format(sparse.COO))
# pip install sparse
import sparse
# find the bounding box of both arrays
extrema = np.array([a.min(axis=0),
a.max(axis=0),
b.min(axis=0),
b.max(axis=0)])
origin = extrema.min(axis=0) - 1
size = tuple(extrema.ptp(axis=0) + 2)
# put nearby voxel arrays into same shape sparse array
sp_a = sparse.COO((a - origin).T,
data=np.ones(len(a), dtype=np.bool),
shape=size)
sp_b = sparse.COO((b - origin).T,
data=np.ones(len(b), dtype=np.bool),
shape=size)
# apply the logical operation
# get a sparse matrix out
applied = operation(sp_a, sp_b)
# reconstruct the original coordinates
coords = np.column_stack(applied.coords) + origin
return coords