How to use the mpi4py.MPI.COMM_WORLD.Get_rank function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Gjacquenot / Puma-EM / code / compute_SAI_precond_MLFMA.py View on Github external
def compute_SAIpreconditioner(tmpDirName, C, chunkNumber_to_cubesNumbers, cubeNumber_to_chunkNumber, chunkNumber_to_processNumber, processNumber_to_ChunksNumbers, MAX_BLOCK_SIZE):
    my_id = MPI.COMM_WORLD.Get_rank()
    # computation of near interactions matrix
    ELEM_TYPE = 'F'
    Z_TMP_ELEM_TYPE = 'F'
    # computation of the Frobenius preconditioner
    Wall_t0 = time.time()
    CPU_t0 = time.clock()
    pathToReadFrom = os.path.join(tmpDirName, 'Z_tmp')
    pathToSaveTo = os.path.join(tmpDirName, 'Mg_LeftFrob')
    # we look for the LIB_G2C type
    file = open('makefile.inc', 'r')
    content = file.readlines()
    file.close()
    for elem in content:
        if 'G2C' in elem:
            LIB_G2C = elem.split('=')[1].split()[0]
    if (my_id == 0):
github StanfordVL / GibsonEnvV2 / examples / train / enjoy_husky_navigate_ppo1.py View on Github external
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    #sess = U.single_threaded_session()
    sess = tf_utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess,tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    use_filler = not args.disable_filler
    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'husky_navigate_enjoy.yaml')
    print(config_file)

    env = HuskyNavigateEnv(gpu_idx=args.gpu_idx, config = config_file)

    def policy_fn(name, ob_space, ac_space):
        if args.mode == "SENSOR":
            return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=64, num_hid_layers=2)
        else:
            #return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, session=sess)
        #else:
            return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space, save_per_acts=10000, session=sess, kind='small')
github r7vme / learning-to-drive-in-a-day / ddpg_with_vae.py View on Github external
def learn(self, total_timesteps, callback=None, vae=None, skip_episodes=5):
        rank = MPI.COMM_WORLD.Get_rank()
        # we assume symmetric actions.
        assert np.all(np.abs(self.env.action_space.low) == self.env.action_space.high)

        self.episode_reward = np.zeros((1,))
        with self.sess.as_default(), self.graph.as_default():
            # Prepare everything.
            self._reset()
            episode_reward = 0.
            episode_step = 0
            episodes = 0
            step = 0
            total_steps = 0

            start_time = time.time()

            actor_losses = []
github Stable-Baselines-Team / stable-baselines / baselines / trpo_mpi / trpo_mpi.py View on Github external
def setup_model(self):
        # prevent import loops
        from baselines.gail.adversary import TransitionClassifier

        with SetVerbosity(self.verbose):

            self.nworkers = MPI.COMM_WORLD.Get_size()
            self.rank = MPI.COMM_WORLD.Get_rank()
            np.set_printoptions(precision=3)

            self.graph = tf.Graph()
            with self.graph.as_default():
                self.sess = tf_util.single_threaded_session(graph=self.graph)

                if self.using_gail:
                    self.reward_giver = TransitionClassifier(self.env, self.hidden_size_adversary,
                                                             entcoeff=self.adversary_entcoeff)

                # Construct network for new policy
                with tf.variable_scope("pi", reuse=False):
                    self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
                                                 None, reuse=False)

                # Network for old policy
github uw-cmg / MAST / MAST / structopt / tools / eval_energy.py View on Github external
energy = energy of Individual evaluated
        bul = bulk structure of Individual if simulation structure is Defect
        individ = Individual class structure evaluated
        signal = string of information about evaluation
    """
    #logger = initialize_logger(Optimizer.loggername)
    logger = logging.getLogger(Optimizer.loggername)
    if 'MAST' in Optimizer.calc_method:
        energy = individ.energy
        bul = individ.bulki
        signal = 'Received MAST structure\n'
        logger.info('Received individual index = {0} from MAST with energy {1}. Returning with no evaluation'.format(
            individ.index, individ.energy))
    else:
        if Optimizer.parallel: 
            rank = MPI.COMM_WORLD.Get_rank()
        logger.info('Received individual HI = {0} with energy {1} for energy evaluation'.format(
            individ.history_index, individ.energy))
        STR='----Individual ' + str(individ.history_index)+ ' Optimization----\n'
        indiv=individ[0]
        if 'EE' in Optimizer.debug:
            debug = True
        else:
            debug = False
        if debug: 
            write_xyz(Optimizer.debugfile,indiv,'Received by eval_energy')
            Optimizer.debugfile.flush()
            logger.debug('Writing recieved individual to debug file')
        # Establish individual structure for evaluation.  Piece together regions when necessary.
        if Optimizer.structure=='Defect':
            indi=indiv.copy()
            bulk=individ.bulki
github AcutronicRobotics / ros2learn / experiments / examples / modular_scara_4dof_v3 / run_mlsh.py View on Github external
def callback(it):
    if MPI.COMM_WORLD.Get_rank()==0:
        if it % 5 == 0 and it > 3: # and not replay:
            fname = osp.join("savedir/", 'checkpoints', '%.5i'%it)
            U.save_state(fname)
    if it == 0:
        print("CALLBACK")
        fname = '/home/rkojcev/baselines_networks/mlsh/saved_models/00040'
        subvars = []
        for i in range(num_subs-1):
            subvars += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="sub_policy_%i" % (i+1))
        print([v.name for v in subvars])
        U.load_state(fname, subvars)
        time.sleep(5)
        pass
github denncli / multi-step-curiosity-driven-learning / recorder.py View on Github external
def get_filename(self, i):
        filename = os.path.join(logger.get_dir(), 'env{}_{}.pk'.format(MPI.COMM_WORLD.Get_rank(), i))
        return filename
github PCCproject / PCC-Uspace / python / models / gym-expr / baselines_master / logger.py View on Github external
def make_output_format(format, ev_dir):
    os.makedirs(ev_dir, exist_ok=True)
    rank = MPI.COMM_WORLD.Get_rank()
    if format == 'stdout':
        return HumanOutputFormat(sys.stdout)
    elif format == 'log':
        suffix = "" if rank==0 else ("-mpi%03i"%rank)
        return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
    elif format == 'json':
        assert rank==0
        return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
    elif format == 'csv':
        assert rank==0
        return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
    elif format == 'tensorboard':
        assert rank==0
        return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
    else:
        raise ValueError('Unknown format specified: %s' % (format,))
github uchicago-cs / deepdish / deepdish / parallel / mpi.py View on Github external
def rank():
    from mpi4py import MPI
    rank = MPI.COMM_WORLD.Get_rank()
    return rank
github uw-cmg / MAST / MAST / structopt / Optimizer.py View on Github external
def algorithm_par_mp(self):
        """Subprogram for running parallel version of GA
        Requires MPI4PY"""
        global logger
        comm = MPI.COMM_WORLD
        rank = MPI.COMM_WORLD.Get_rank()
        if rank==0:
            if 'MA' in self.debug:
                debug = True
            else:
                debug = False
            self.algorithm_initialize()
        self.convergence = False
        convergence = False
        while not convergence:
            if rank==0:
                pop = self.population
                offspring = self.generation_set(self,pop)
                # Identify the individuals with an invalid fitness
                invalid_ind = [ind for ind in offspring if ind.energy==0]
                #Evaluate the individuals with invalid fitness
                self.output.write('\n--Evaluate Structures--\n')