Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
command = [b"openssl", b"verify", b"-CAfile", cafile, certificatefile]
try:
result = run_process(command, **kwargs)
return result.output.strip() == b"{}: OK".format(certificatefile)
except CalledProcessError as e:
result = run_process([
"openssl", "x509", "-text", "-in", cafile], **kwargs)
cafile_info = result.output
result = run_process([
"openssl", "x509", "-text", "-in", certificatefile], **kwargs)
certificate_info = result.output
error = str(e)
error = error + "\n" + cafile_info + "\n" + certificate_info
Message.new(
message_type="flocker.ca.functional:openssl_verify_error",
error=error).write(Logger())
return False
def mysql_can_connect():
try:
return connect(
host=host,
port=port,
user=user,
passwd=passwd,
db=db,
)
except Error as e:
Message.new(
message_type="acceptance:mysql_connect_error",
error=str(e)).write(Logger())
return False
dl = loop_until(mysql_can_connect)
@implementer(IStoragePool)
@with_repr(["_name"])
@with_cmp(["_name", "_mount_root"])
class StoragePool(Service):
"""
A ZFS storage pool.
Remotely owned filesystems are mounted read-only to prevent changes
(divergence which would break ``zfs recv``). This is done by having the
root dataset be ``readonly=on`` - which is inherited by all child datasets.
Locally owned datasets have this overridden with an explicit
```readonly=off`` property set on them.
"""
logger = Logger()
def __init__(self, reactor, name, mount_root):
"""
:param reactor: A ``IReactorProcess`` provider.
:param bytes name: The pool's name.
:param FilePath mount_root: Directory where filesystems should be
mounted.
"""
self._reactor = reactor
self._name = name
self._mount_root = mount_root
def startService(self):
"""
Make sure that the necessary properties are set on the root Flocker zfs
storage pool.
from twisted.internet.defer import gatherResults
from . import IStateChange, in_parallel, sequentially
from ..control._model import (
DatasetChanges, DatasetHandoff, NodeState, Manifestation, Dataset,
ip_to_uuid,
)
from ..volume._ipc import RemoteVolumeManager, standard_node
from ..volume._model import VolumeSize
from ..volume.service import VolumeName
from ._deploy import IDeployer, NodeLocalState, NotInUseDatasets
_logger = Logger()
def _to_volume_name(dataset_id):
"""
Convert dataset ID to ``VolumeName`` with ``u"default"`` namespace.
To be replaced in https://clusterhq.atlassian.net/browse/FLOC-737 with
real namespace support.
:param unicode dataset_id: Dataset ID.
:return: ``VolumeName`` with default namespace.
"""
return VolumeName(namespace=u"default", dataset_id=dataset_id)
"""
Cross-process log tracing: HTTP server.
"""
from __future__ import unicode_literals
import sys
from flask import Flask, request
from eliot import Logger, to_file, Action, start_action, add_global_fields
add_global_fields(process="server")
to_file(sys.stdout)
logger = Logger()
app = Flask("server")
def divide(x, y):
with start_action(logger, "divide", x=x, y=y) as action:
result = x / y
action.add_success_fields(result=result)
return result
@app.route("/")
def main():
with Action.continue_task(logger, request.headers["x-eliot-task-id"]):
x = int(request.args["x"])
from eliot import Message, Logger, start_action
from twisted.internet.defer import succeed
from . import IStateChange, in_parallel, sequentially
from ._docker import DockerClient, PortMap, Environment, Volume as DockerVolume
from ..control._model import (
Application, AttachedVolume, NodeState, DockerImage, Port, Link,
RestartNever, ip_to_uuid,
)
from ._deploy import IDeployer, NodeLocalState
_logger = Logger()
NOOP_SLEEP_TIME = timedelta(seconds=5)
def _eliot_system(part):
return u"flocker:node:container_deployer:" + part
@implementer(IStateChange)
class StartApplication(PClass):
"""
Launch the supplied application as a container.
:ivar Application application: The ``Application`` to create and
start.
"""
response = field()
next_scheduled = field()
class ControlAMPService(Service):
"""
Control Service AMP server.
Convergence agents connect to this server.
:ivar dict _current_command: A dictionary containing information about
connections to which state updates are currently in progress. The keys
are protocol instances. The values are ``_UpdateState`` instances.
"""
logger = Logger()
def __init__(self, reactor, cluster_state, configuration_service, endpoint,
context_factory):
"""
:param reactor: See ``ControlServiceLocator.__init__``.
:param ClusterStateService cluster_state: Object that records known
cluster state.
:param ConfigurationPersistenceService configuration_service:
Persistence service for desired cluster configuration.
:param endpoint: Endpoint to listen on.
:param context_factory: TLS context factory.
"""
self.connections = set()
self._current_command = {}
self.cluster_state = cluster_state
self.configuration_service = configuration_service
class DeviceVersionMismatch(DeviceException):
"""
The version of device not supported.
"""
class DeviceExceptionObjNotFound(Exception):
"""
The Object not found on device
"""
# Eliot is transitioning away from the "Logger instances all over the place"
# approach. And it's hard to put Logger instances on PRecord subclasses which
# we have a lot of. So just use this global logger for now.
_logger = Logger()
class XtremIOMgmt():
"""
EMC XtremIO exposes management interface through XMS. This class abstracts all REST calls to be
used by iSCSI class and the main driver class
"""
GET = "GET"
POST = "POST"
DELETE = "DELETE"
VOL_FLOCKER = "VOL_FLOCKER"
VOLUME_FOLDERS = "volume-folders"
CAPTION = 'caption'
# The leases in the configuration are out of date.
new_config = config.set("leases", new_leases)
d = persistence_service.save(new_config)
d.addCallback(lambda _: new_config.leases)
return d
return succeed(new_leases)
class ConfigurationPersistenceService(MultiService):
"""
Persist configuration to disk, and load it back.
:ivar Deployment _deployment: The current desired deployment configuration.
:ivar bytes _hash: A SHA256 hash of the configuration.
"""
logger = Logger()
def __init__(self, reactor, path):
"""
:param reactor: Reactor to use for thread pool.
:param FilePath path: Directory where desired deployment will be
persisted.
"""
MultiService.__init__(self)
self._path = path
self._config_path = self._path.child(b"current_configuration.json")
self._change_callbacks = []
LeaseService(reactor, self).setServiceParent(self)
def startService(self):
if not self._path.exists():
self._path.makedirs()
from twisted.python.filepath import FilePath
from twisted.python.components import proxyForInterface
from .. import (
IDeployer, IStateChange, sequentially, in_parallel, run_state_change
)
from .._deploy import NotInUseDatasets
from ...control import NodeState, Manifestation, Dataset, NonManifestDatasets
from ...common import auto_threaded
# Eliot is transitioning away from the "Logger instances all over the place"
# approach. And it's hard to put Logger instances on PRecord subclasses which
# we have a lot of. So just use this global logger for now.
_logger = Logger()
# The size which will be assigned to datasets with an unspecified
# maximum_size.
# XXX: Make this configurable. FLOC-2044
DEFAULT_DATASET_SIZE = int(GiB(100).to_Byte().value)
@attributes(["dataset_id"])
class DatasetWithoutVolume(Exception):
"""
An operation was attempted on a dataset that involves manipulating the
dataset's volume but that volume could not be found.
:ivar UUID dataset_id: The unique identifier of the dataset the operation
was meant to affect.
"""