How to use the ansible.constants function in ansible

To help you get started, we’ve selected a few ansible examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github RackHD / RackHD / test / benchmark / utils / ansible_control.py View on Github external
def __get_hosts(self):
        # Overwrite remote temp file incase ansible doesn't have right to default path
        constants.DEFAULT_REMOTE_TMP = '/tmp'

        # Disable host key checking incase RackHD is not in known_hosts
        constants.HOST_KEY_CHECKING = False

        # Dynamic Inventory
        inventory = """
            [test_machine]
            localhost   ansible_connection=local

            [rackhd]
            {{ rackhd_ip_address }}:{{ rackhd_ssh_port }}

            [test_machine:vars]
            ansible_sudo_pass={{ local_pwd }}

            [rackhd:vars]
github ansible / ansible / lib / ansible / plugins / cache / memcached.py View on Github external
def __init__(self, *args, **kwargs):
        if C.CACHE_PLUGIN_CONNECTION:
            connection = C.CACHE_PLUGIN_CONNECTION.split(',')
        else:
            connection = ['127.0.0.1:11211']

        self._timeout = C.CACHE_PLUGIN_TIMEOUT
        self._prefix = C.CACHE_PLUGIN_PREFIX
        self._cache = ProxyClientPool(connection, debug=0)
        self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
github alibaba / ansible-provider-docs / lib / ansible / plugins / connection / ssh.py View on Github external
# Transfer methods to try
        methods = []

        # Use the transfer_method option if set, otherwise use scp_if_ssh
        ssh_transfer_method = self._play_context.ssh_transfer_method
        if ssh_transfer_method is not None:
            if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')):
                raise AnsibleOptionsError('transfer_method needs to be one of [smart|sftp|scp|piped]')
            if ssh_transfer_method == 'smart':
                methods = ['sftp', 'scp', 'piped']
            else:
                methods = [ssh_transfer_method]
        else:
            # since this can be a non-bool now, we need to handle it correctly
            scp_if_ssh = C.DEFAULT_SCP_IF_SSH
            if not isinstance(scp_if_ssh, bool):
                scp_if_ssh = scp_if_ssh.lower()
                if scp_if_ssh in BOOLEANS:
                    scp_if_ssh = boolean(scp_if_ssh, strict=False)
                elif scp_if_ssh != 'smart':
                    raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
            if scp_if_ssh == 'smart':
                methods = ['sftp', 'scp', 'piped']
            elif scp_if_ssh is True:
                methods = ['scp']
            else:
                methods = ['sftp']

        for method in methods:
            returncode = stdout = stderr = None
            if method == 'sftp':
github ansible / ansible / lib / ansible / executor / task_executor.py View on Github external
def _get_persistent_connection_options(self, connection, variables, templar):
        final_vars = combine_vars(variables, variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()))

        option_vars = C.config.get_plugin_vars('connection', connection._load_name)
        plugin = connection._sub_plugin
        if plugin.get('type'):
            option_vars.extend(C.config.get_plugin_vars(plugin['type'], plugin['name']))

        options = {}
        for k in option_vars:
            if k in final_vars:
                options[k] = templar.template(final_vars[k])

        return options
github ansible / ansible / lib / ansible / executor / playbook_executor.py View on Github external
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
                            self._tqm.clear_failed_hosts()

                        # if the last result wasn't zero or 3 (some hosts were unreachable),
                        # break out of the serial batch loop
                        if result not in (0, 3):
                            break

                    i = i + 1 # per play

                if entry:
                    entrylist.append(entry) # per playbook

                # send the stats callback for this playbook
                if self._tqm is not None:
                    if C.RETRY_FILES_ENABLED:
                        retries = list(set(self._tqm._failed_hosts.keys() + self._tqm._unreachable_hosts.keys()))
                        retries.sort()
                        if len(retries) > 0:
                            if C.RETRY_FILES_SAVE_PATH:
                                basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
                            else:
                                basedir = os.path.dirname(playbook_path)

                            (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
                            filename = os.path.join(basedir, "%s.retry" % retry_name)
                            if self._generate_retry_inventory(filename, retries):
                                display.display("\tto retry, use: --limit @%s\n" % filename)

                    self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)

                # if the last result wasn't zero, break out of the playbook file name loop
github openebs / openebs / e2e / ansible / plugins / callback / openebs.py View on Github external
def v2_runner_item_on_skipped(self, result):
        my_result = result._result
        required_result = '_ansible_verbose_always'

        if C.DISPLAY_SKIPPED_HOSTS:
            if (self._display.verbosity > 0 or required_result in my_result):
                if required_result not in my_result:
                    required_item = self._get_item(my_result)
                    dumped_result = self._dump_results(my_result)
                    result_host = result._host.get_name()
                    msg = "skipping: [%s] => (item=%s) => %s" % (result_host,
                                                                 required_item,
                                                                 dumped_result)
                self._display.display(msg, color=C.COLOR_SKIP)
github ansible / ansible / lib / ansible / cli / vault.py View on Github external
def run(self):
        super(VaultCLI, self).run()
        loader = DataLoader()

        # set default restrictive umask
        old_umask = os.umask(0o077)

        vault_ids = list(context.CLIARGS['vault_ids'])

        # there are 3 types of actions, those that just 'read' (decrypt, view) and only
        # need to ask for a password once, and those that 'write' (create, encrypt) that
        # ask for a new password and confirm it, and 'read/write (rekey) that asks for the
        # old password, then asks for a new one and confirms it.

        default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
        vault_ids = default_vault_ids + vault_ids

        action = context.CLIARGS['action']

        # TODO: instead of prompting for these before, we could let VaultEditor
        #       call a callback when it needs it.
        if action in ['decrypt', 'view', 'rekey', 'edit']:
            vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
                                                     vault_password_files=list(context.CLIARGS['vault_password_files']),
                                                     ask_vault_pass=context.CLIARGS['ask_vault_pass'])
            if not vault_secrets:
                raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")

        if action in ['encrypt', 'encrypt_string', 'create']:

            encrypt_vault_id = None
github ansible / ansible / lib / ansible / vars / clean.py View on Github external
remove_keys.update(fact_keys.intersection(C.MAGIC_VARIABLE_MAPPING[magic_var]))

    # remove common connection vars
    remove_keys.update(fact_keys.intersection(C.COMMON_CONNECTION_VARS))

    # next we remove any connection plugin specific vars
    for conn_path in connection_loader.all(path_only=True):
        conn_name = os.path.splitext(os.path.basename(conn_path))[0]
        re_key = re.compile('^ansible_%s_' % conn_name)
        for fact_key in fact_keys:
            # most lightweight VM or container tech creates devices with this pattern, this avoids filtering them out
            if (re_key.match(fact_key) and not fact_key.endswith(('_bridge', '_gwbridge'))) or fact_key.startswith('ansible_become_'):
                remove_keys.add(fact_key)

    # remove some KNOWN keys
    for hard in C.RESTRICTED_RESULT_KEYS + C.INTERNAL_RESULT_KEYS:
        if hard in fact_keys:
            remove_keys.add(hard)

    # finally, we search for interpreter keys to remove
    re_interp = re.compile('^ansible_.*_interpreter$')
    for fact_key in fact_keys:
        if re_interp.match(fact_key):
            remove_keys.add(fact_key)
    # then we remove them (except for ssh host keys)
    for r_key in remove_keys:
        if not r_key.startswith('ansible_ssh_host_key_'):
            try:
                r_val = to_text(data[r_key])
                if len(r_val) > 24:
                    r_val = '%s ... %s' % (r_val[:13], r_val[-6:])
            except Exception:
github ansible / ansible / lib / ansible / plugins / action / net_base.py View on Github external
if self.provider.get('transport') == 'netconf' and play_context.network_os in _NETCONF_SUPPORTED_PLATFORMS \
                    and self._task.action not in _CLI_ONLY_MODULES:
                play_context.connection = 'netconf'
                play_context.port = int(self.provider['port'] or self._play_context.port or 830)
            elif self.provider.get('transport') in ('nxapi', 'eapi') and play_context.network_os in ('nxos', 'eos'):
                play_context.connection = 'local'
                play_context.port = int(self.provider['port'] or self._play_context.port or 22)
            else:
                play_context.connection = 'network_cli'
                play_context.port = int(self.provider['port'] or self._play_context.port or 22)

            play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
            play_context.remote_user = self.provider['username'] or self._play_context.connection_user
            play_context.password = self.provider['password'] or self._play_context.password
            play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
            play_context.timeout = int(self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
            if 'authorize' in self.provider.keys():
                play_context.become = self.provider['authorize'] or False
                play_context.become_pass = self.provider['auth_pass']
                play_context.become_method = 'enable'

            if self._play_context.connection == 'local':
                if self.provider.get('transport') == 'nxapi' and play_context.network_os == 'nxos':
                    self._task.args['provider'] = _NxosActionModule.nxapi_implementation(self.provider, self._play_context)
                elif self.provider.get('transport') == 'eapi' and play_context.network_os == 'eos':
                    self._task.args['provider'] = _EosActionModule.eapi_implementation(self.provider, self._play_context)
                else:
                    socket_path = self._start_connection(play_context)
                    task_vars['ansible_socket'] = socket_path

        else:
            provider = self._task.args.get('provider', {})
github ansible / ansible / lib / ansible / runner / action_plugins / patch.py View on Github external
if self.runner.become and self.runner.become_user != 'root':
            if not self.runner.noop_on_check(inject):
                self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)

        new_module_args = dict(
            src=tmp_src,
        )

        if self.runner.noop_on_check(inject):
            new_module_args['CHECKMODE'] = True

        module_args = utils.merge_module_args(module_args, new_module_args)

        data = self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
        if not C.DEFAULT_KEEP_REMOTE_FILES:
            self.runner._remove_tmp_path(conn, tmp_path)

        return data