How to use the eventlet.timeout.Timeout function in eventlet

To help you get started, we’ve selected a few eventlet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nameko / nameko / test / test_messaging.py View on Github external
ctx_data = {
        'language': 'en',
        'customheader': 'customvalue',
    }
    with wait_for_call(CONSUME_TIMEOUT, container.spawn_worker) as method:
        method.assert_called_once_with(consumer, ('msg',), {},
                                       context_data=ctx_data,
                                       handle_result=ANY_PARTIAL)
        handle_result = method.call_args[1]['handle_result']

    # ack message
    handle_result(worker_ctx, 'result')

    # stop will hang if the consumer hasn't acked or requeued messages
    with eventlet.timeout.Timeout(CONSUME_TIMEOUT):
        consumer.stop()

    consumer.queue_consumer.kill()
github openstack / cinder / cinder / volume / drivers / vmware / vmware_images.py View on Github external
# In case of Glance - VMware transfer, we just need a handle to the
    # HTTP Connection that is to send transfer data to the VMware datastore.
    if write_file_handle:
        write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
    # In case of VMware - Glance transfer, we relinquish VMware HTTP file read
    # handle to Glance Client instance, but to be sure of the transfer we need
    # to be sure of the status of the image on glance changing to active.
    # The GlanceWriteThread handles the same for us.
    elif image_service and image_id:
        write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
                                                 image_service, image_id,
                                                 image_meta)
    # Start the read and write threads.
    read_event = read_thread.start()
    write_event = write_thread.start()
    timer = timeout.Timeout(timeout_secs)
    try:
        # Wait on the read and write events to signal their end
        read_event.wait()
        write_event.wait()
    except (timeout.Timeout, Exception) as exc:
        # In case of any of the reads or writes raising an exception,
        # stop the threads so that we un-necessarily don't keep the other one
        # waiting.
        read_thread.stop()
        write_thread.stop()

        # Log and raise the exception.
        LOG.exception(exc)
        raise exception.CinderException(exc)
    finally:
        timer.cancel()
github openstack / nova / nova / virt / powervm / operator.py View on Github external
:raises: PowerVMLPARInstanceNotFound
        :raises: PowerVMLPAROperationTimeout
        :raises: InvalidParameterValue
        """
        # make sure it's a valid status
        if (status == constants.POWERVM_NOSTATE or
                not status in constants.POWERVM_POWER_STATE):
            msg = _("Invalid LPAR state: %s") % status
            raise n_exc.InvalidParameterValue(err=msg)

        # raise the given timeout exception if the loop call doesn't complete
        # in the specified timeout
        timeout_exception = exception.PowerVMLPAROperationTimeout(
                                                operation=operation,
                                                instance_name=instance_name)
        with eventlet_timeout.Timeout(timeout, timeout_exception):
            def _wait_for_lpar_status(instance_name, status):
                """Called at an interval until the status is reached."""
                lpar_obj = self.get_lpar(instance_name)
                if lpar_obj['state'] == status:
                    raise loopingcall.LoopingCallDone()

            timer = loopingcall.FixedIntervalLoopingCall(_wait_for_lpar_status,
                                                         instance_name, status)
            timer.start(interval=1).wait()
github openstack / tacker / tacker / vnfm / infra_drivers / openstack / openstack.py View on Github external
finally:
                sys.path.remove(vnf_package_path)
                LOG.debug('Remove sys.path: %s', sys.path)

            try:
                klass = getattr(module, user_data_class)
            except Exception:
                self._delete_user_data_module(user_data_module)
                error_reason = _(
                    "failed to get UserData class based on "
                    "lcm-operation-user-data-class from additionalParams.")
                raise vnfm.LCMUserDataFailed(reason=error_reason)

            # Set the timeout and execute the UserData script.
            hot_param_dict = None
            with eventlet.timeout.Timeout(USER_DATA_TIMEOUT, False):
                try:
                    hot_param_dict = klass.instantiate(
                        base_hot_dict, vnfd_dict, inst_req_info, grant_info)
                except Exception:
                    raise
                finally:
                    self._delete_user_data_module(user_data_module)

            if hot_param_dict is not None:
                LOG.info('HOT input parameter: %s', hot_param_dict)
            else:
                error_reason = _(
                    "fails due to timeout[sec]: %s") % USER_DATA_TIMEOUT
                raise vnfm.LCMUserDataFailed(reason=error_reason)
            if not isinstance(hot_param_dict, dict):
                error_reason = _(
github eventlet / eventlet / eventlet / db_pool.py View on Github external
def connect(cls, db_module, connect_timeout, *args, **kw):
        t = timeout.Timeout(connect_timeout, ConnectTimeout())
        try:
            return db_module.connect(*args, **kw)
        finally:
            t.cancel()
github tamiel / fshttpstream / src / fshttpstream / fsconnector.py View on Github external
def connect(self):
        self.log.info("fsconnector - connecting to %s %s" % (self.password, str(self.addr)))
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.settimeout(20.0)
        timer = eventlet.timeout.Timeout(20.0)
        try:
            self.sock.connect(self.addr)
            data = self.sock.recv(1024)
            if not self.__command('auth %s' % self.password):
                self.sock.close()
                self.log.error("fsconnector - auth failure")
                return False
            self.log.info("fsconnector - auth success")
            res = self.__set_filter()
            if res:
                self.log.info("fsconnector - connected")
                self.running = True
                return res
        except eventlet.timeout.Timeout, te:
            self.log.error("fsconnector - handler timeout")
        except socket.timeout, se:
github openstack / trove / trove / taskmanager / models.py View on Github external
LOG.debug("Begin rolling cluster upgrade for id: %s.", cluster_id)

        def _upgrade_cluster_instance(instance):
            LOG.debug("Upgrading instance with id: %s.", instance.id)
            context.notification = (
                DBaaSInstanceUpgrade(context, **request_info))
            with StartNotification(
                context, instance_id=instance.id,
                datastore_version_id=datastore_version.id):
                with EndNotification(context):
                    instance.update_db(
                        datastore_version_id=datastore_version.id,
                        task_status=InstanceTasks.UPGRADING)
                    instance.upgrade(datastore_version)

        timeout = Timeout(CONF.cluster_usage_timeout)
        cluster_notification = context.notification
        request_info = cluster_notification.serialize(context)
        try:
            instances = []
            for db_inst in DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all():
                instance = BuiltInstanceTasks.load(
                    context, db_inst.id)
                instances.append(instance)

            if ordering_function is not None:
                instances.sort(key=ordering_function)

            for instance in instances:
                _upgrade_cluster_instance(instance)
github openstack / trove / trove / common / strategies / cluster / experimental / cassandra / taskmanager.py View on Github external
# Delete decommissioned instances only when the cluster is in a
                # consistent state.
                LOG.debug("Deleting decommissioned instances.")
                for node in removed_nodes:
                    Instance.delete(node['instance'])

                LOG.debug("Cluster configuration finished successfully.")
            except Exception:
                LOG.exception(_("Error shrinking cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _shrink_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for shrinking cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End shrink_cluster for id: %s.", cluster_id)
github openstack / trove / trove / common / strategies / cluster / experimental / cassandra / taskmanager.py View on Github external
# Now detached, failed nodes will stay available
                    # in the list of standalone instances.
                    return

                # Delete decommissioned instances only when the cluster is in a
                # consistent state.
                LOG.debug("Deleting decommissioned instances.")
                for node in removed_nodes:
                    Instance.delete(node['instance'])

                LOG.debug("Cluster configuration finished successfully.")
            except Exception:
                LOG.exception(_("Error shrinking cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _shrink_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for shrinking cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End shrink_cluster for id: %s.", cluster_id)
github tuskar / tuskar / tuskar / openstack / common / rpc / impl_zmq.py View on Github external
import eventlet
import greenlet
from oslo.config import cfg

from tuskar.openstack.common import excutils
from tuskar.openstack.common.gettextutils import _
from tuskar.openstack.common import importutils
from tuskar.openstack.common import jsonutils
from tuskar.openstack.common import processutils as utils
from tuskar.openstack.common.rpc import common as rpc_common

zmq = importutils.try_import('eventlet.green.zmq')

# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException

zmq_opts = [
    cfg.StrOpt('rpc_zmq_bind_address', default='*',
               help='ZeroMQ bind address. Should be a wildcard (*), '
                    'an ethernet interface, or IP. '
                    'The "host" option should point or resolve to this '
                    'address.'),

    # The module.Class to use for matchmaking.
    cfg.StrOpt(
        'rpc_zmq_matchmaker',
        default=('tuskar.openstack.common.rpc.'
                 'matchmaker.MatchMakerLocalhost'),