How to use the mpi4py.MPI.Get_processor_name function in mpi4py

To help you get started, we’ve selected a few mpi4py examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github espressopp / espressopp / contrib / mpi4py / mpi4py-2.0.0 / demo / sequential / test_seq.py View on Github external
def test():
    size = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    name = MPI.Get_processor_name()
    with Seq(MPI.COMM_WORLD, 1, 10):
        print(
            "Hello, World! I am process %d of %d on %s."
            % (rank, size, name))
github bccp / nbodykit / nbodykit / utils / parallel.py View on Github external
def wait(self):
        """
        If this isn't the master process, wait for instructions.
        """
        if self.is_master():
            raise RuntimeError("master node told to await jobs")

        # logging info
        if self.subcomm.rank == 0:
            args = (self.rank, MPI.Get_processor_name(), self.subcomm.size)
            self.logger.info("worker master rank is %d on %s with %d processes available" %args)

        # continously loop and wait for instructions
        while True:
            args = None
            tag = -1

            # have the master rank of the subcomm ask for task and then broadcast
            if self.subcomm.rank == 0:
                self.comm.send(None, dest=0, tag=self.tags.READY)
                args = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=self.status)
                tag = self.status.Get_tag()
            
            # bcast to everyone in the worker subcomm
            args  = self.subcomm.bcast(args)
            tag   = self.subcomm.bcast(tag)
github bccp / nbodykit / bin / fof.py View on Github external
"""
        )

h = "Data source to read particle position:\n\n"
parser.add_argument("datasource", type=DataSource.fromstring,
        help=h + DataSource.format_help())
parser.add_argument("LinkingLength", type=float, 
        help='LinkingLength in mean separation (0.2)')
parser.add_argument("output", help='output file; output.grp.N and output.halo are written')
parser.add_argument("--nmin", type=float, default=32, help='minimum number of particles in a halo')

ns = parser.parse_args()
from mpi4py import MPI

rank = MPI.COMM_WORLD.rank
name = MPI.Get_processor_name().split('.')[0]
logging.basicConfig(level=logging.DEBUG,
                    format='rank %d on %s: '%(rank,name) + \
                            '%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
                    datefmt='%m-%d %H:%M:%S')


from nbodykit.fof import fof

def main():
    comm = MPI.COMM_WORLD

    catalogue, labels = fof(ns.datasource, ns.LinkingLength, ns.nmin, comm, return_labels=True)
    Ntot = comm.allreduce(len(labels))

    if comm.rank == 0:
        with h5py.File(ns.output + '.hdf5', 'w') as ff:
github bccp / nbodykit / bin / nbkit.py View on Github external
def setup_logging(log_level):
    """
    Set the basic configuration of all loggers
    """

    # This gives:
    #
    # [ 000000.43 ]   0:waterfall 06-28 14:49  measurestats    INFO     Nproc = [2, 1, 1]
    # [ 000000.43 ]   0:waterfall 06-28 14:49  measurestats    INFO     Rmax = 120

    import time
    logger = logging.getLogger();
    t0 = time.time()

    rank = MPI.COMM_WORLD.rank
    name = MPI.Get_processor_name().split('.')[0]

    class Formatter(logging.Formatter):
        def format(self, record):
            s1 = ('[ %09.2f ] % 3d:%s ' % (time.time() - t0, rank, name))
            return s1 + logging.Formatter.format(self, record)

    fmt = Formatter(fmt='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
                    datefmt='%m-%d %H:%M ')

    hdlr = logging.StreamHandler()
    hdlr.setFormatter(fmt)
    logger.addHandler(hdlr)
    logger.setLevel(log_level)
github bccp / nbodykit / bin / nbkit-batch.py View on Github external
def setup_logging(log_level):
    """
    Set the basic configuration of all loggers
    """

    # This gives:
    #
    # [ 000000.43 ]   0:waterfall 06-28 14:49  measurestats    INFO     Nproc = [2, 1, 1]
    # [ 000000.43 ]   0:waterfall 06-28 14:49  measurestats    INFO     Rmax = 120

    import time
    logger = logging.getLogger();
    t0 = time.time()

    rank = MPI.COMM_WORLD.rank
    name = MPI.Get_processor_name().split('.')[0]

    class Formatter(logging.Formatter):
        def format(self, record):
            s1 = ('[ %09.2f ] % 3d:%s ' % (time.time() - t0, rank, name))
            return s1 + logging.Formatter.format(self, record)

    fmt = Formatter(fmt='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
                    datefmt='%m-%d %H:%M ')

    hdlr = logging.StreamHandler()
    hdlr.setFormatter(fmt)
    logger.addHandler(hdlr)
    logger.setLevel(log_level)
github blekhmanlab / hominid / hominid / hominid.py View on Github external
print(
                            "[controller {}] unrecognized message from source {} with tag {}:\n{}".format(
                                datetime.datetime.now().isoformat(), source, tag, msg
                            )
                        )
                print(
                    "[controller {}] all workers have exited".format(
                        datetime.datetime.now().isoformat()
                    )
                )
        else:
            # this process is a worker
            self.initialize_worker()
            worker_t0 = time.time()
            task_count = 0
            name = MPI.Get_processor_name()
            print(
                "[worker {} {}] running on {}".format(
                    rank, datetime.datetime.now().isoformat(), name
                )
            )
            while True:
                print(
                    "[worker {} {}] sending request for work".format(
                        rank, datetime.datetime.now().isoformat()
                    )
                )
                comm.send(None, dest=0, tag=READY_)
                print(
                    "[worker {} {}] waiting for work".format(
                        rank, datetime.datetime.now().isoformat()
                    )
github westpa / westpa / lib / wwmgr / work_managers / mpi.py View on Github external
def __init__(self):
        # Initialize communicator and obtain standard MPI variables
        comm = MPI.COMM_WORLD

        self.comm = comm
        self.rank = comm.Get_rank()
        self.num_procs = comm.Get_size()
        self.name = MPI.Get_processor_name()

        # Define master rank
        self.master_rank = 0

        # Define message tags for task, result, and announce
        self.task_tag = 10
        self.result_tag = 20
        self.announce_tag = 30

        # create an empty message buffer
        messages = []
github gprMax / gprMax / gprMax / gprMax.py View on Github external
Python code blocks in input file.
        optparams (dict): Optional argument. For Taguchi optimisation it
                provides the parameters to optimise and their values.
    """

    from mpi4py import MPI

    # Define MPI message tags
    tags = Enum('tags', {'READY': 0, 'DONE': 1, 'EXIT': 2, 'START': 3})

    # Initializations and preliminaries
    comm = MPI.COMM_WORLD
    size = comm.Get_size()  # total number of processes
    rank = comm.Get_rank()  # rank of this process
    status = MPI.Status()   # get MPI status object
    hostname = MPI.Get_processor_name()     # get name of processor/host

    # Set range for number of models to run
    modelstart = args.restart if args.restart else 1
    modelend = modelstart + args.n
    numbermodelruns = args.n
    currentmodelrun = modelstart  # can use -task argument to start numbering from something other than 1
    numworkers = size - 1

    ##################
    # Master process #
    ##################
    if rank == 0:
        tsimstart = timer()
        mpistartstr = '\n=== MPI task farm (WITHOUT using MPI Spawn)'
        print('{} {}'.format(mpistartstr, '=' * (get_terminal_width() - 1 - len(mpistartstr))))
        print('=== MPI master ({}, rank: {}) on {} using {} workers...'.format(comm.name, comm.Get_rank(), hostname, numworkers))
github twiecki / mpi4py_map / mpi4py_map / mpi4py_map.py View on Github external
Sequence of elements supplied to the workers.

    :Optional:
        args : tuple
            Additional constant arguments supplied to function.
        debug : bool=False
            Be very verbose (for debugging purposes).

    """
    debug = 'debug' in kwargs
    if debug:
        del kwargs['debug']

    rank = MPI.COMM_WORLD.Get_rank()
    assert rank == 0, "rank has to be 0."
    proc_name = MPI.Get_processor_name()
    status = MPI.Status()

    process_list = range(1, MPI.COMM_WORLD.Get_size())
    workers_done = []
    results = {}
    if debug: print "Data:", sequence

    # Instead of distributing the actual elements, we just distribute
    # the index as the workers already have the sequence. This allows
    # objects to be used as well.
    queue = iter(xrange(len(sequence)))

    if debug: print "Controller %i on %s: ready!" % (rank, proc_name)

    # Feed all queued jobs to the childs
    while(True):
github chaitan3 / adFVM / adFVM / parallel.py View on Github external
names = {}
    if rank == 0:
        for n, r in nameRanks:
            if n not in names:
                names[n] = []
            names[n].append(r)
        for n in names:
            names[n].sort()
    names = mpi.bcast(names, root=0)
    return names[name].index(rank), len(names[name])

try:
    from mpi4py import MPI
    mpi = MPI.COMM_WORLD
    nProcessors = mpi.Get_size()
    name = MPI.Get_processor_name()
    rank = mpi.Get_rank()
    localRank, nRanksPerNode = getLocalRank(mpi, name, rank)
    #mpsRank = localRank % 2
    #os.environ['CUDA_MPS_PIPE_DIRECTORY'] = '/tmp/nvidia-pipe-{}'.format(mpsRank)
    #os.environ['CUDA_MPS_LOG_DIRECTORY'] = '/tmp/nvidia-log-{}'.format(mpsRank)
except:
    print('mpi4py NOT LOADED: YOU SHOULD BE RUNNING ON A SINGLE CORE')
    class Container(object):
        pass
    mpi = Container()
    nProcessors = 1
    name = ''
    rank = 0
    localRank, nRanksPerNode = 0, 1
    mpi.bcast = lambda x, root: x
    mpi.Bcast = lambda x, root: None