How to use the mpi4py.MPI.COMM_WORLD.Barrier function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AcutronicRobotics / ros2learn / experiments / examples / MARA / train_mlsh.py View on Github external
def main(env, savename, save_dir, replay, macro_duration, num_subs, num_rollouts, warmup_time, train_time, force_subpolicy, store):
    if MPI.COMM_WORLD.Get_rank() == 0 and osp.exists(LOGDIR):
        shutil.rmtree(LOGDIR)
    MPI.COMM_WORLD.Barrier()
    # with logger.session(dir=LOGDIR):
    #train(env, savename, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store)
    train(env, savename, save_dir, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store)
if __name__ == '__main__':
github CDMCH / ddpg-curiosity-and-multi-criteria-her / ddpg_curiosity_mc_her / ddpg / training.py View on Github external
for role, agent in agents.items():
                    param_noise_distances[role] = agent.adapt_param_noise()

            for train_step in range(n_batches):
                critic_losses = {}
                actor_losses = {}
                for role, agent in agents.items():
                    critic_losses[role], actor_losses[role] = agent.train()
                for agent in agents.values():
                    agent.update_target_net()

                batch += 1

        if heatmaps:
            train_rollout_worker.flush_env_location_records()
            MPI.COMM_WORLD.Barrier()
            logger.info("Creating heatmap...")
            if rank == 0:
                heatmap_save_path = generate_3d_fetch_stack_heatmap_from_npy_records(
                    working_dir=os.path.join(logger.get_dir(), 'heatmaps'),
                    file_prefix='epoch{}'.format(epoch),
                    delete_records=True
                )
                logger.info("Heatmap saved to {}".format(heatmap_save_path))

        # test
        if do_evaluation:
            eval_rollout_worker.clear_history()
            for _ in range(n_test_rollouts):
                eval_rollout_worker.generate_rollouts()

            current_score = mpi_average(eval_rollout_worker.current_score())
github mfem / PyMFEM / mfem / common / parcsr_extra.py View on Github external
col_starts[2] = n
    else:
       # make sure that dtype is right....
       col_starts = np.array(col_starts, dtype = dtype)
    if check_partitioning:
        ch = get_assumed_patitioning(m)
        if (row_starts[0] != ch[0] or 
            row_starts[1] != ch[1] or 
            nrows != ch[2]):
            for k in range(num_proc):
                MPI.COMM_WORLD.Barrier()                              
                if myid == k:
                    print 'MyID : ', k
                    print ch, nrows, row_starts, col_starts
                    print 'NNZ', np.sum(data != 0.0)
            MPI.COMM_WORLD.Barrier()                              
            raise ValueError("partitioning of input matrix is not correct")
    if verbose: verbose_message(m, n, nrows, i, j, data, row_starts, col_starts)

    
    #
    # it seems row_starts and col_starts are both to determin
    # which part is treated diagnal element.
    #
    if (m == n and row_starts[0] == col_starts[0] and
        row_starts[1] == col_starts[1]):
        # this will cause hypre_CSRMatrixReorder call.
        M = mfem.HypreParMatrix(MPI.COMM_WORLD,
                                nrows,
                                m, n, [i, j,
                                data, col_starts])
        M.CopyRowStarts()
github AcutronicRobotics / ros2learn / experiments / examples / modular_scara_4dof_v3 / train_mlsh.py View on Github external
def main(env, savename, replay, macro_duration, num_subs, num_rollouts, warmup_time, train_time, force_subpolicy, store):
    if MPI.COMM_WORLD.Get_rank() == 0 and osp.exists(LOGDIR):
        shutil.rmtree(LOGDIR)
    MPI.COMM_WORLD.Barrier()
    # with logger.session(dir=LOGDIR):
    train(env, savename, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store)
github AcutronicRobotics / ros2learn / experiments / examples / modular_scara_3dof_v3 / train_mlsh.py View on Github external
def main(env, savename, save_dir, replay, macro_duration, num_subs, num_rollouts, warmup_time, train_time, force_subpolicy, store):
    if MPI.COMM_WORLD.Get_rank() == 0 and osp.exists(LOGDIR):
        shutil.rmtree(LOGDIR)
    MPI.COMM_WORLD.Barrier()
    # with logger.session(dir=LOGDIR):
    #train(env, savename, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store)
    train(env, savename, save_dir, replay, macro_duration, num_subs,  num_rollouts, warmup_time, train_time, force_subpolicy, store)
if __name__ == '__main__':
github jaredwo / topowx / scripts / step24_mpi_xval_interp.py View on Github external
else:
        raise ValueError("Unrecognized element: " + elem)
        
    rank = MPI.COMM_WORLD.Get_rank()
    nsize = MPI.COMM_WORLD.Get_size()
    
    print "Process %d of %d: element is %s" % (rank, nsize, elem)
            
    if rank == RANK_COORD:        
        proc_coord(fpath_stndb, elem, nsize - N_NON_WRKRS)
    elif rank == RANK_WRITE:
        proc_write(fpath_stndb, elem, fpath_out, nsize - N_NON_WRKRS)
    else:
        proc_work(fpath_stndb, elem, rank)

    MPI.COMM_WORLD.Barrier()
github jaredwo / topowx / twx / infill / mpi_infill_optim_tair.py View on Github external
#        ds = Dataset('/projects/daymet2/station_data/infill/xval_impute_norm.nc')
#        params[P_INCLUDE_STNIDS] = np.array(ds.variables['stn_id'][:],dtype="
github AcutronicRobotics / ros2learn / experiments / examples / dual_robot / run_mlsh.py View on Github external
# parser = argparse.ArgumentParser()
    # parser.add_argument('--optimize', type=bool)
    # args = parser.parse_args()
    #
    # env = 'GazeboModularScara4DOF-v3'

    # if 'optimize' == True:
    #     main(job_id, env, savename, replay, params['macro_duration'], params['num_subs'], params['num_rollouts'], params['warmup_time'],  params['train_time'], force_subpolicy, store)
    # else:
    #     #Parameters set by user
    #     job_id = None


    if MPI.COMM_WORLD.Get_rank() == 0 and osp.exists(LOGDIR):
        shutil.rmtree(LOGDIR)
    MPI.COMM_WORLD.Barrier()
    # with logger.session(dir=LOGDIR):
    load()
github deephyper / deephyper / deephyper / search / nas / nas_a3c_async_emb.py View on Github external
def __init__(self, problem, run, evaluator, **kwargs):
        self.rank = MPI.COMM_WORLD.Get_rank()
        if self.rank == 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        MPI.COMM_WORLD.Barrier()
        if self.rank != 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
        # set in super : self.evaluator

        self.num_episodes = kwargs.get('num_episodes')
        if self.num_episodes is None:
            self.num_episodes = math.inf

        self.reward_rule = util.load_attr_from('deephyper.search.nas.agent.utils.'+kwargs['reward_rule'])

        self.space = self.problem.space

        logger.debug(f'evaluator: {type(self.evaluator)}')
github jaredwo / topowx / twx / infill / mpi_infill_optim_prcp_norm.py View on Github external
if rank == RANK_COORD:
        
        params[P_EXCLUDE_STNIDS] = np.array([])
        ds = Dataset('/projects/daymet2/station_data/infill/xval_infill_po.nc')
        params[P_INCLUDE_STNIDS] = np.array(ds.variables['stn_id'][:],dtype="