How to use the numpy.concatenate function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MIC-DKFZ / RegRCNN / unittests.py View on Github external
cf.batch_size = 3
        cf.fold = 0
        cf.plot_dir = cf.exp_dir / "plots"
        logger = utils.get_logger(cf.exp_dir, cf.server_env, cf.sysmetrics_interval)
        cf.num_val_batches = "all"
        cf.val_mode = "val_sampling"
        cf.n_workers = 8
        batch_gens = dloader.get_train_generators(cf, logger, data_statistics=False)
        val_loader = batch_gens["val_sampling"]

        for epoch in range(4):
            produced_ids = []
            for i in range(batch_gens['n_val']):
                batch = next(val_loader)
                produced_ids.append(batch["pid"])
            uni, cts = np.unique(np.concatenate(produced_ids), return_counts=True)
            assert np.all(cts < 3), "with batch size one: every item should occur exactly once.\n uni {}, cts {}".format(
                uni[cts>2], cts[cts>2])
            #assert len(np.setdiff1d(val_loader.generator.dataset_pids, uni))==0, "not all val pids were shown."
            assert len(np.setdiff1d(uni, val_loader.generator.dataset_pids))==0, "pids shown that are not val set. impossible?"

        cf.n_workers = os.cpu_count()
        cf.batch_size = int(val_loader.generator.dataset_length / cf.n_workers) + 2
        val_loader = dloader.create_data_gen_pipeline(cf, val_loader.generator._data, do_aug=False, sample_pids_w_replace=False,
                                                             max_batches=None, raise_stop_iteration=True)
        for epoch in range(2):
            produced_ids = []
            for b, batch in enumerate(val_loader):
                produced_ids.append(batch["pid"])
            uni, cts = np.unique(np.concatenate(produced_ids), return_counts=True)
            assert np.all(cts == 1), "with batch size one: every item should occur exactly once.\n uni {}, cts {}".format(
                uni[cts>1], cts[cts>1])
github dmlc / dgl / python / dgl / data / sbm.py View on Github external
def collate_fn(self, x):
        g, lg, deg_g, deg_lg, pm_pd = zip(*x)
        g_batch = batch(g)
        lg_batch = batch(lg)
        degg_batch = np.concatenate(deg_g, axis=0)
        deglg_batch = np.concatenate(deg_lg, axis=0)
        pm_pd_batch = np.concatenate([x + i * self._n_nodes for i, x in enumerate(pm_pd)], axis=0)
        return g_batch, lg_batch, degg_batch, deglg_batch, pm_pd_batch
github p-christ / Deep-Reinforcement-Learning-Algorithms-with-PyTorch / environments / ant_environments / maze_env.py View on Github external
def _get_obs(self):
    wrapped_obs = self.wrapped_env._get_obs()
    if self._top_down_view:
      view = [self.get_top_down_view().flat]
    else:
      view = []

    if self._observe_blocks:
      additional_obs = []
      for block_name, block_type in self.movable_blocks:
        additional_obs.append(self.wrapped_env.get_body_com(block_name))
      wrapped_obs = np.concatenate([wrapped_obs[:3]] + additional_obs +
                                   [wrapped_obs[3:]])

    range_sensor_obs = self.get_range_sensor_obs()
    return np.concatenate([wrapped_obs,
                           range_sensor_obs.flat] +
                           view + [[self.t * 0.001]])
github richardruancw / StarcraftElite / models / pg.py View on Github external
def calculate_advantage_TD(self, paths):
		all_advs = []
		all_returns = []
		for path in paths:
			rewards = path["reward"]
			observations = path["observation"]
			baseline_val = self.sess.run(
				[self.baseline], feed_dict={self.observation_placeholder: observations})[0]
			returns = rewards[:-1] + self.config.gamma * baseline_val[1:]
			adv = returns
			if(self.config.use_baseline):
				adv -= baseline_val[:-1]
			all_advs.append(adv)
			all_returns.append(returns)
		adv = np.concatenate(all_advs)
		returns = np.concatenate(all_returns)
		if self.config.normalize_advantage:
			adv -= np.mean(adv)
			adv /= np.std(adv)
		return adv, returns
github pypr / pysph / pysph / examples / rigid_body / three_spheres_in_fluid.py View on Github external
def properties_of_three_spheres():
    x_cube, y_cube = create_sphere()
    b_id = np.array([])

    b_id1 = np.ones_like(x_cube, dtype=int) * 0
    rho_1 = np.ones_like(x_cube, dtype=int) * 2000
    b_id2 = np.ones_like(x_cube, dtype=int) * 1
    rho_2 = np.ones_like(x_cube, dtype=int) * 1000
    b_id3 = np.ones_like(x_cube, dtype=int) * 2
    rho_3 = np.ones_like(x_cube, dtype=int) * 500

    b_id = np.concatenate([b_id1, b_id2, b_id3])
    rho = np.concatenate([rho_1, rho_2, rho_3])
    return b_id, rho
github compas-dev / compas / src / compas / numerical / algorithms / dr_6dof_updated.py View on Github external
T0[i * 3:(i + 1) * 3, 0:3] = array([x0, y0, z0]).transpose()

        Ju = array([], dtype=int)
        Iu = where(array(fdof_node) == ui)[0]
        if len(Iu):
            Ju = (array(fdof)[Iu] - ui) * 100 - 1
            Ju = array([int(round(j)) for j in Ju], dtype=int64)

        Jv = array([], dtype=int)
        Iv = where(array(fdof_node) == vi)[0]
        if len(Iv):
            Jv = (array(fdof)[Iv] - vi) * 100 + 5
            Jv = array([int(round(j)) for j in Jv], dtype=int64)

        Je = concatenate((Ju, Jv), 0)
        Ie = concatenate((Iu, Iv), 0)
        de += len(Je)
        I[De[i]:de] = Ie
        J[De[i]:de] = Je
        De[i + 1] = de

        for j in range(len(Je)):
            for k in range(len(Je)):
                row.append(Ie[j])
                col.append(Ie[k])

        E  = edge.get('E', 0)
        A  = edge.get('A', 0)
        nu = edge.get('nu', 0)
        Ix = edge.get('Ix', 0)
        Iy = edge.get('Iy', 0)
        Iz = edge.get('Iz', 0)
github ddrrrr / projectRUL / attention2.py View on Github external
fea_dict['kur'] = fea_dict['kur'][:,:,np.newaxis]
        fea_dict['skew'] = np.sum((data-fea_dict['mean'].repeat(data.shape[2],axis=2))**3,axis=2) \
                / (np.var(data,axis=2)**(3/2)*data.shape[2])
        fea_dict['skew'] = fea_dict['skew'][:,:,np.newaxis]
        fea_dict['p2p'] = np.max(data,axis=2,keepdims=True) - np.min(data,axis=2,keepdims=True)
        fea_dict['var'] = np.var(data,axis=2,keepdims=True)
        fea_dict['cre'] = np.max(abs(data),axis=2,keepdims=True) / fea_dict['rms']
        fea_dict['imp'] = np.max(abs(data),axis=2,keepdims=True) \
                / np.mean(abs(data),axis=2,keepdims=True)
        fea_dict['mar'] = np.max(abs(data),axis=2,keepdims=True) \
                / (np.mean((abs(data))**0.5,axis=2,keepdims=True))**2
        fea_dict['sha'] = fea_dict['rms'] / np.mean(abs(data),axis=2,keepdims=True)
        fea_dict['smr'] = (np.mean((abs(data))**0.5,axis=2,keepdims=True))**2
        fea_dict['cle'] = fea_dict['p2p'] / fea_dict['smr']

        fea = np.concatenate(tuple(x for x in fea_dict.values()),axis=2)
        fea = fea.reshape(-1,fea.shape[1]*fea.shape[2])
        # self.feature_size = fea.shape[1]
        if is_norm:
            fea = self._normalize(fea,dim=1)
        fea = fea[:,np.newaxis,:]
        return fea
github StructuralNeurobiologyLab / SyConn / syconn / cnn / TrainData.py View on Github external
comp_d = np.concatenate([v[0].load()[None,] for v in d])
    small_dist_d = np.concatenate([v[1].load()[None,] for v in d])
    big_dist_d = np.array(comp_d)  # copy
    perm_ixs = np.roll(np.arange(d.shape[0]), 1)
    big_dist_d = big_dist_d[perm_ixs]  # rotate array by 1, i.e. each original
    # view will be compared to a randomly assigned view (likely to be different)
    # change channels
    channels_to_load = list(channels_to_load)
    out_d_1 = comp_d[:, channels_to_load[0]][:, None]
    out_d_2 = small_dist_d[:, channels_to_load[0]][:, None]
    out_d_3 = big_dist_d[:, channels_to_load[0]][:, None]
    for ch in channels_to_load[1:]:
        out_d_1 = np.concatenate([out_d_1, comp_d[:, ch][:, None]], axis=1)
        out_d_2 = np.concatenate([out_d_2, small_dist_d[:, ch][:, None]],
                                 axis=1)
        out_d_3 = np.concatenate([out_d_3, big_dist_d[:, ch][:, None]], axis=1)

    # change number views
    if view_striding != 1:
        assert view_striding in [1, 2, 3]
        out_d_1 = out_d_1[:, :, ::view_striding, :, :]
        out_d_2 = out_d_2[:, :, ::view_striding, :, :]
        out_d_3 = out_d_3[:, :, ::view_striding, :, :]

    # sample views
    view_sampling = np.random.choice(comp_d.shape[2], 3, replace=False)
    out_d_1 = out_d_1[:, :, view_sampling[0]][:, :, None]
    out_d_2 = out_d_2[:, :, view_sampling[1]][:, :, None]
    out_d_3 = out_d_3[:, :, view_sampling[2]][:, :, None]

    out_d = np.concatenate([out_d_1, out_d_2, out_d_3], axis=2)
    return out_d
github DLTK / DLTK / dltk / io / augmentation.py View on Github external
# Concatenate them and return the examples
            ex_images = np.concatenate((ex_images, ex_image), axis=0) \
                if (len(ex_images) != 0) else ex_image
            ex_lbls = np.concatenate((ex_lbls, ex_lbl), axis=0) \
                if (len(ex_lbls) != 0) else ex_lbl

        class_ex_images.append(ex_images)
        class_ex_lbls.append(ex_lbls)

        ratio = n_ex_per_class[c_idx] / len(ex_images)
        min_ratio = ratio if ratio < min_ratio else min_ratio

    indices = np.floor(n_ex_per_class * min_ratio).astype(int)

    ex_images = np.concatenate([cimage[:idxs] for cimage, idxs in zip(class_ex_images, indices)
                                if len(cimage) > 0], axis=0)
    ex_lbls = np.concatenate([clbl[:idxs] for clbl, idxs in zip(class_ex_lbls, indices)
                              if len(clbl) > 0], axis=0)

    return ex_images, ex_lbls
github ceteke / cae / form_embedding.py View on Github external
swwae.restore(os.path.join(parsed.out_dir))

X_test, _ = dataset.get_batches(parsed.batch_size, train=False)
test_steps = len(X_test)

print("Forming embedding matrix")

for test_step in range(test_steps):
    X_test_step = X_test[test_step]
    representation = swwae.get_representation(input=X_test_step)

    if test_step == 0:
        embedding_matrix = representation
    else:
        embedding_matrix = np.concatenate((embedding_matrix, representation))

print(embedding_matrix.shape)

tf_path = parsed.save_path + '/embedding'

embedding_tensor = tf.stack(embedding_matrix, name='embedding')
embedding_tensor_variable = tf.Variable(embedding_tensor, trainable=False)
save_sess.run(tf.variables_initializer([embedding_tensor_variable]))
saver = tf.train.Saver(var_list=[embedding_tensor_variable])
saver.save(save_sess, save_path=tf_path)

meta_data = dataset.get_metadata()

with open(os.path.join(parsed.save_path, 'metadata.tsv'), 'w+') as f:
    f.writelines(meta_data)