Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_export_concat(self):
x1 = np.arange(1000, dtype=np.float32)
x2 = np.arange(100, dtype=np.float32)
self.x_concat = np.concatenate((x1, x2))
dataset1 = vx.dataset.DatasetArrays("dataset1")
dataset2 = vx.dataset.DatasetArrays("dataset2")
dataset1.add_column("x", x1)
dataset2.add_column("x", x2)
self.dataset_concat = vx.dataset.DatasetConcatenated([dataset1, dataset2], name="dataset_concat")
path_hdf5 = tempfile.mktemp(".hdf5")
self.dataset_concat.export_hdf5(path_hdf5)
def test_virtual_columns_spherical(self):
alpha = np.array([0.])
delta = np.array([0.])
distance = np.array([1.])
dataset = vx.dataset.DatasetArrays()
dataset.add_column("alpha", alpha)
dataset.add_column("delta", delta)
dataset.add_column("distance", distance)
dataset.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", "x", "y", "z", radians=False)
subspace = dataset("x", "y", "z")
x, y, z = subspace.sum()
self.assertAlmostEqual(x, 1)
self.assertAlmostEqual(y, 0)
self.assertAlmostEqual(z, 0)
dataset.add_virtual_columns_cartesian_to_spherical("x", "y", "z", "theta", "phi", "r", radians=False)
theta, phi, r = dataset("theta", "phi", "r").row(0)
ar1 = np.zeros((10, 2))
ar2 = np.zeros((20))
arrays = [ar1, ar2]
N = len(arrays)
datasets = [vx.dataset.DatasetArrays("dataset1") for i in range(N)]
for dataset, array in zip(datasets, arrays):
dataset.add_column("x", array)
with self.assertRaises(ValueError):
dataset_concat = vx.dataset.DatasetConcatenated(datasets, name="dataset_concat")
ar1 = np.zeros((10))
ar2 = np.zeros((20))
arrays = [ar1, ar2]
N = len(arrays)
datasets = [vx.dataset.DatasetArrays("dataset1") for i in range(N)]
for dataset, array in zip(datasets, arrays):
dataset.add_column("x", array)
dataset_concat = vx.dataset.DatasetConcatenated(datasets, name="dataset_concat")
dataset_concat1 = vx.dataset.DatasetConcatenated(datasets, name="dataset_concat")
dataset_concat2 = vx.dataset.DatasetConcatenated(datasets, name="dataset_concat")
self.assertEqual(len(dataset_concat1.concat(dataset_concat2).datasets), 4)
self.assertEqual(len(dataset_concat1.concat(datasets[0]).datasets), 3)
self.assertEqual(len(datasets[0].concat(dataset_concat1).datasets), 3)
self.assertEqual(len(datasets[0].concat(datasets[0]).datasets), 2)
def setUp(self):
self.dataset = vaex.dataset.DatasetArrays("dataset")
self.x = x = np.arange(10)
self.y = y = x ** 2
self.dataset.add_column("x", x)
self.dataset.add_column("y", y)
self.dataset.set_variable("t", 1.)
self.dataset.add_virtual_column("z", "x+t*y")
self.app = vx.ui.main.VaexApp()
def create_base_ds():
dataset = vaex.dataset.DatasetArrays("dataset")
x = np.arange(-2, 40, dtype=">f8").reshape((-1,21)).T.copy()[:,0]
y = y = x ** 2
ints = np.arange(-2,19, dtype="i8")
ints[0] = 2**62+1
ints[1] = -2**62+1
ints[2] = -2**62-1
ints[0+10] = 2**62+1
ints[1+10] = -2**62+1
ints[2+10] = -2**62-1
dataset.add_column("x", x)
dataset.add_column("y", y)
# m = x.copy()
m = np.arange(-2, 40, dtype=">f8").reshape((-1,21)).T.copy()[:,0]
ma_value = 77777
m[-1+10] = ma_value
m[-1+20] = ma_value
#self.jobsManager = dataset.JobsManager()
x = np.array([0., 1])
y = np.array([-1., 1])
self.datasetxy = vx.dataset.DatasetArrays("datasetxy")
self.datasetxy.add_column("x", x)
self.datasetxy.add_column("y", y)
x1 = np.array([1., 3])
x2 = np.array([2., 3, 4,])
x3 = np.array([5.])
self.x_concat = np.concatenate((x1, x2, x3))
dataset1 = vx.dataset.DatasetArrays("dataset1")
dataset2 = vx.dataset.DatasetArrays("dataset2")
dataset3 = vx.dataset.DatasetArrays("dataset3")
dataset1.add_column("x", x1)
dataset2.add_column("x", x2)
dataset3.add_column("x", x3)
dataset3.add_column("y", x3**2)
self.dataset_concat = vx.dataset.DatasetConcatenated([dataset1, dataset2, dataset3], name="dataset_concat")
self.dataset_concat_dup = vx.dataset.DatasetConcatenated([self.dataset, self.dataset, self.dataset], name="dataset_concat_dup")
#self.jobsManager = dataset.JobsManager()
x = np.array([0., 1])
y = np.array([-1., 1])
self.datasetxy = vx.dataset.DatasetArrays("datasetxy")
self.datasetxy.add_column("x", x)
self.datasetxy.add_column("y", y)
x1 = np.array([1., 3])
x2 = np.array([2., 3, 4,])
x3 = np.array([5.])
self.x_concat = np.concatenate((x1, x2, x3))
dataset1 = vx.dataset.DatasetArrays("dataset1")
dataset2 = vx.dataset.DatasetArrays("dataset2")
dataset3 = vx.dataset.DatasetArrays("dataset3")
dataset1.add_column("x", x1)
dataset2.add_column("x", x2)
dataset3.add_column("x", x3)
dataset3.add_column("y", x3**2)
self.dataset_concat = vx.dataset.DatasetConcatenated([dataset1, dataset2, dataset3], name="dataset_concat")
self.dataset_concat_dup = vx.dataset.DatasetConcatenated([self.dataset, self.dataset, self.dataset], name="dataset_concat_dup")
for d in range(dimension):
vaex.vaexfast.soneira_peebles(array[d], 0, 1, L[d], eta, max_level)
for d, name in zip(list(range(dimension)), "x y z w v u".split()):
self.add_column(name, array[d])
if 0:
order = np.zeros(N, dtype=np.int64)
vaex.vaexfast.shuffled_sequence(order);
for i, name in zip(list(range(dimension)), "x y z w v u".split()):
#np.take(array[i], order, out=array[i])
reorder(array[i], array[-1], order)
self.addColumn(name, array=array[i])
dataset_type_map["soneira-peebles"] = SoneiraPeebles
class Zeldovich(DatasetArrays):
def __init__(self, dim=2, N=256, n=-2.5, t=None, seed=None, scale=1, name="zeldovich approximation"):
super(Zeldovich, self).__init__(name=name)
if seed is not None:
np.random.seed(seed)
#sys.exit(0)
shape = (N,) * dim
A = np.random.normal(0.0, 1.0, shape)
F = np.fft.fftn(A)
K = np.fft.fftfreq(N, 1./(2*np.pi))[np.indices(shape)]
k = (K**2).sum(axis=0)
k_max = np.pi
F *= np.where(np.sqrt(k) > k_max, 0, np.sqrt(k**n) * np.exp(-k*4.0))
F.flat[0] = 0
#pylab.imshow(np.where(sqrt(k) > k_max, 0, np.sqrt(k**-2)), interpolation='nearest')
grf = np.fft.ifftn(F).real
if type.kind in ["i"]:
masked_array.data[masked_array.mask] = 0
self.add_column(clean_name, self.table[name].data)
if type.kind in ["SU"]:
self.add_column(clean_name, self.table[name].data)
#dataset.samp_id = table_id
#self.list.addDataset(dataset)
#return dataset
def read_table(self):
self.table = astropy.table.Table.read(self.filename, format=self.format, **kwargs)
import astropy.io.votable
import string
class VOTable(DatasetArrays):
def __init__(self, filename):
DatasetArrays.__init__(self, filename)
self.filename = filename
self.path = filename
votable = astropy.io.votable.parse(self.filename)
self.first_table = votable.get_first_table()
self.description = self.first_table.description
for field in self.first_table.fields:
name = field.name
data = self.first_table.array[name]
type = self.first_table.array[name].dtype
clean_name = _python_save_name(name, self.columns.keys())
if field.ucd:
self.ucds[clean_name] = field.ucd