How to use the ipyparallel.error function in ipyparallel

To help you get started, we’ve selected a few ipyparallel examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github elfi-dev / elfi / tests / functional / test_compilation.py View on Github external
def test_meta_param(ma2):
    sim = ma2.get_reference('MA2')

    # Test that it is passed
    try:
        # Add to state
        sim['_uses_meta'] = True
        sim.generate()
        assert False, "Should raise an error"
    except TypeError:
        assert True
    except ipyparallel.error.RemoteError:
        assert True
github flatironinstitute / CaImAn / ca_source_extraction / utilities.py View on Github external
if ipcluster == "ipcluster":
            p1 = subprocess.Popen("ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt'))
        else:
            p1 = subprocess.Popen(shlex.split("{0} start -n {1}".format(ipcluster, ncpus)), shell=True, close_fds=(os.name != 'nt'))
#
        while True:
            try:
                c = ipyparallel.Client()
                if len(c) < ncpus:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    raise ipyparallel.error.TimeoutError
                c.close()                

                break
            except (IOError, ipyparallel.error.TimeoutError):
                sys.stdout.write(".")
                sys.stdout.flush()
                time.sleep(1)
                
    else:
        shell_source(slurm_script)
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)
        ee = c[:]
        ne = len(ee)
        print 'Running on %d engines.' % (ne)
        c.close()
        sys.stdout.write(" done\n")
github flatironinstitute / CaImAn / ca_source_extraction / utilities.py View on Github external
sys.stdout.flush()
    ncpus=psutil.cpu_count()
    
    if slurm_script is None:
        if ipcluster == "ipcluster":
            p1 = subprocess.Popen("ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt'))
        else:
            p1 = subprocess.Popen(shlex.split("{0} start -n {1}".format(ipcluster, ncpus)), shell=True, close_fds=(os.name != 'nt'))
#
        while True:
            try:
                c = ipyparallel.Client()
                if len(c) < ncpus:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    raise ipyparallel.error.TimeoutError
                c.close()                

                break
            except (IOError, ipyparallel.error.TimeoutError):
                sys.stdout.write(".")
                sys.stdout.flush()
                time.sleep(1)
                
    else:
        shell_source(slurm_script)
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)
        ee = c[:]
        ne = len(ee)
        print 'Running on %d engines.' % (ne)
        c.close()
github ipython / ipyparallel / ipyparallel / controller / scheduler.py View on Github external
def handle_stranded_tasks(self, engine):
        """Deal with jobs resident in an engine that died."""
        lost = self.pending[engine]
        for msg_id in list(lost.keys()):
            if msg_id not in lost:
                # prevent double-handling of messages
                continue

            raw_msg = lost[msg_id].raw_msg
            idents, msg = self.session.feed_identities(raw_msg, copy=False)
            parent = self.session.unpack(msg[1].bytes)
            idents = [engine, idents[0]]

            # build fake error reply
            try:
                raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
            except:
                content = error.wrap_exception()
            # build fake metadata
            md = dict(
                status=u'error',
                engine=engine.decode('ascii'),
                date=util.utcnow(),
            )
            msg = self.session.msg('apply_reply', content, parent=parent, metadata=md)
            raw_reply = list(map(zmq.Message, self.session.serialize(msg, ident=idents)))
            # and dispatch it
            self.dispatch_result(raw_reply)

        # finally scrub completed/failed lists
        self.completed.pop(engine)
        self.failed.pop(engine)
github ipython / ipyparallel / ipyparallel / controller / scheduler.py View on Github external
    def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
        """a task has become unreachable, send a reply with an ImpossibleDependency
        error."""
        if msg_id not in self.queue_map:
            self.log.error("task %r already failed!", msg_id)
            return
        job = self.queue_map.pop(msg_id)
        # lazy-delete from the queue
        job.removed = True
        for mid in job.dependents:
            if mid in self.graph:
                self.graph[mid].remove(msg_id)

        try:
            raise why()
        except:
            content = error.wrap_exception()
github ipython / ipyparallel / ipyparallel / client / client.py View on Github external
def _build_targets(self, targets):
        """Turn valid target IDs or 'all' into two lists:
        (int_ids, uuids).
        """
        if not self._ids:
            # flush notification socket if no engines yet, just in case
            if not self.ids:
                raise error.NoEnginesRegistered("Can't build targets without any engines")

        if targets is None:
            targets = self._ids
        elif isinstance(targets, string_types):
            if targets.lower() == 'all':
                targets = self._ids
            else:
                raise TypeError("%r not valid str target, must be 'all'"%(targets))
        elif isinstance(targets, int):
            if targets < 0:
                targets = self.ids[targets]
            if targets not in self._ids:
                raise IndexError("No such engine: %i"%targets)
            targets = [targets]

        if isinstance(targets, slice):
github ipython / ipyparallel / ipyparallel / controller / hub.py View on Github external
def _handle_stranded_msgs(self, eid, uuid):
        """Handle messages known to be on an engine when the engine unregisters.

        It is possible that this will fire prematurely - that is, an engine will
        go down after completing a result, and the client will be notified
        that the result failed and later receive the actual result.
        """

        outstanding = self.queues[eid]

        for msg_id in outstanding:
            self.pending.remove(msg_id)
            self.all_completed.add(msg_id)
            try:
                raise error.EngineError("Engine %r died while running task %r" % (eid, msg_id))
            except:
                content = error.wrap_exception()
            # build a fake header:
            header = {}
            header['engine'] = uuid
            header['date'] = util.utcnow()
            rec = dict(result_content=content, result_header=header, result_buffers=[])
            rec['completed'] = util.ensure_timezone(header['date'])
            rec['engine_uuid'] = uuid
            try:
                self.db.update_record(msg_id, rec)
            except Exception:
                self.log.error("DB Error handling stranded msg %r", msg_id, exc_info=True)
github ipython / ipyparallel / ipyparallel / controller / scheduler.py View on Github external
"""callback for a job's timeout.
        
        The job may or may not have been run at this point.
        """
        if job.timeout_id != timeout_id:
            # not the most recent call
            return
        now = time.time()
        if job.timeout >= (now + 1):
            self.log.warn("task %s timeout fired prematurely: %s > %s",
                job.msg_id, job.timeout, now
            )
        if job.msg_id in self.queue_map:
            # still waiting, but ran out of time
            self.log.info("task %r timed out", job.msg_id)
            self.fail_unreachable(job.msg_id, error.TaskTimeout)
github ipython / ipyparallel / ipyparallel / client / client.py View on Github external
It is possible that this will fire prematurely - that is, an engine will
        go down after completing a result, and the client will be notified
        of the unregistration and later receive the successful result.
        """

        outstanding = self._outstanding_dict[uuid]

        for msg_id in list(outstanding):
            if msg_id in self.results:
                # we already
                continue
            try:
                raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
            except:
                content = error.wrap_exception()
            # build a fake message:
            msg = self.session.msg('apply_reply', content=content)
            msg['parent_header']['msg_id'] = msg_id
            msg['metadata']['engine'] = uuid
            self._handle_apply_reply(msg)