How to use the pyvcloud.vcd.vapp.VApp function in pyvcloud

To help you get started, we’ve selected a few pyvcloud examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vmware / pyvcloud / system_tests / vapp_tests.py View on Github external
logger = Environment.get_default_logger()
        vapp = Environment.get_vapp_in_test_vdc(
            client=TestVApp._sys_admin_client,
            vapp_name=TestVApp._customized_vapp_name)

        logger.debug('Move vApp ' + TestVApp._customized_vapp_name)
        task = vapp.move_to(target_vdc.get('href'))
        result = TestVApp._sys_admin_client.get_task_monitor(
        ).wait_for_success(task)
        self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)

        target_vdc = org.get_vdc(TestVApp._ovdc_name)
        target_vdc_obj = VDC(
            TestVApp._sys_admin_client, href=target_vdc.get('href'))
        vapp_resource = target_vdc_obj.get_vapp(TestVApp._customized_vapp_name)
        vapp = VApp(TestVApp._sys_admin_client, resource=vapp_resource)

        target_vdc = Environment.get_test_vdc(TestVApp._client)
        logger.debug('Move back vApp ' + TestVApp._customized_vapp_name)
        task = vapp.move_to(target_vdc.href)
        result = TestVApp._sys_admin_client.get_task_monitor(
        ).wait_for_success(task)
        self.assertEqual(result.get('status'), TaskStatus.SUCCESS.value)
github vmware / vcd-cli / system_tests / vapp_tests.py View on Github external
def test_0070_upgrade_virtual_hardware(self):
        vapp_obj = VApp(VAppTest._client, href=VAppTest._test_vapp)
        self._power_off_and_undeploy(vapp_obj)
        result = VAppTest._runner.invoke(
            vapp, args=['upgrade-virtual-hardware', VAppTest._test_vapp_name])
        self.assertEqual(0, result.exit_code)

        result = VAppTest._runner.invoke(
            vapp, args=['deploy', VAppTest._test_vapp_name])
        self.assertEqual(0, result.exit_code)
        vapp_obj.reload()
        task = vapp_obj.power_on()
        VAppTest._client.get_task_monitor().wait_for_success(task)
github vmware / pyvcloud / pyvcloud / system_test_framework / environment.py View on Github external
def get_vapp_in_test_vdc(cls, client, vapp_name):
        """Gets the vApp identified by it's name in the current org vdc.

        :param pyvcloud.vcd.client.Client client: client which will be used to
            create the VApp object.

        :param str vapp_name: name of the vApp which needs to be retrieved.

        :return: the requested vApp.

        :rtype: pyvcloud.vcd.vapp.VApp
        """
        vdc = cls.get_test_vdc(client)
        vapp_resource = vdc.get_vapp(vapp_name)
        return VApp(client, resource=vapp_resource)
github vmware / ansible-module-vcloud-director / modules / vcd_vapp.py View on Github external
def power_on(self):
        vapp_name = self.params.get('vapp_name')
        response = dict()
        response['changed'] = False

        vapp = self.get_vapp()

        if vapp.is_powered_on():
            msg = 'Vapp {} is already powered on'
            response['warnings'] = msg.format(vapp_name)
            return response

        try:
            vapp_resource = self.vdc.get_vapp(vapp_name)
            vapp = VApp(self.client, name=vapp_name, resource=vapp_resource)
            power_on_vapp_task = vapp.power_on()
            self.execute_task(power_on_vapp_task)
            msg = 'Vapp {} has been powered on'
            response['msg'] = msg.format(vapp_name)
            response['changed'] = True
        except OperationNotSupportedException:
            msg = 'Operation is not supported. You may have no VM(s) in {}'
            response['warnings'] = msg.format(vapp_name)

        return response
github vmware / container-service-extension / container_service_extension / cluster.py View on Github external
# href on their own.

        sys_admin_client = None
        try:
            sys_admin_client = vcd_utils.get_sys_admin_client()
            org_name = org.get_name()
            org_resource = sys_admin_client.get_org_by_name(org_name)
            org_sa = Org(sys_admin_client, resource=org_resource)
            catalog_item = org_sa.get_catalog_item(
                catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
            catalog_item_href = catalog_item.Entity.get('href')
        finally:
            if sys_admin_client:
                sys_admin_client.logout()

        source_vapp = VApp(client, href=catalog_item_href)
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
github vmware / vcd-cli / vcd_cli / vapp.py View on Github external
def reboot(ctx, name, vm_names):
    try:
        restore_session(ctx, vdc_required=True)
        client = ctx.obj['client']
        vdc_href = ctx.obj['profiles'].get('vdc_href')
        vdc = VDC(client, href=vdc_href)
        vapp_resource = vdc.get_vapp(name)
        vapp = VApp(client, resource=vapp_resource)
        if len(vm_names) == 0:
            task = vapp.reboot()
            stdout(task, ctx)
        else:
            for vm_name in vm_names:
                vm = VM(client, href=vapp.get_vm(vm_name).get('href'))
                vm.reload()
                task = vm.reboot()
                stdout(task, ctx)
    except Exception as e:
        stderr(e, ctx)
github vmware / container-service-extension / container_service_extension / def_ / cluster_service.py View on Github external
def _delete_nodes(sysadmin_client: vcd_client.Client, vapp_href, node_names,
                  cluster_name=''):
    vcd_utils.raise_error_if_not_sysadmin(sysadmin_client)

    LOGGER.debug(f"Deleting node(s) {node_names} from cluster '{cluster_name}'"
                 f" (vapp: {vapp_href})")
    script = "#!/usr/bin/env bash\nkubectl delete node "
    for node_name in node_names:
        script += f' {node_name}'
    script += '\n'

    vapp = vcd_vapp.VApp(sysadmin_client, href=vapp_href)
    try:
        master_node_names = get_node_names(vapp, NodeType.MASTER)
        run_script_in_nodes(sysadmin_client, vapp_href, [master_node_names[0]],
                            script)
    except Exception:
        LOGGER.warning(f"Failed to delete node(s) {node_names} from cluster "
                       f"'{cluster_name}' using kubectl (vapp: {vapp_href})")

    vapp = vcd_vapp.VApp(sysadmin_client, href=vapp_href)
    for vm_name in node_names:
        vm = vcd_vm.VM(sysadmin_client, resource=vapp.get_vm(vm_name))
        try:
            task = vm.undeploy()
            sysadmin_client.get_task_monitor().wait_for_status(task)
        except Exception:
            LOGGER.warning(f"Failed to undeploy VM {vm_name} "
github vmware / vcd-cli / vcd_cli / vapp.py View on Github external
def use(ctx, name):
    try:
        restore_session(ctx, vdc_required=True)
        client = ctx.obj['client']
        in_use_org_name = ctx.obj['profiles'].get('org_in_use')
        in_use_vdc_name = ctx.obj['profiles'].get('vdc_in_use')
        vdc_href = ctx.obj['profiles'].get('vdc_href')
        vdc = VDC(client, href=vdc_href)
        vapp_resource = vdc.get_vapp(name)
        vapp = VApp(client, resource=vapp_resource)
        ctx.obj['profiles'].set('vapp_in_use', str(name))
        ctx.obj['profiles'].set('vapp_href', str(vapp.href))
        message = 'now using org: \'%s\', vdc: \'%s\', vApp: \'%s\'.' % \
                  (in_use_org_name, in_use_vdc_name, name)
        stdout({
            'org': in_use_org_name,
            'vdc': in_use_vdc_name,
            'vapp': name
        }, ctx, message)
    except Exception as e:
        stderr(e, ctx)
github vmware / container-service-extension / container_service_extension / configure_cse.py View on Github external
msg = "Fixing metadata on CSE k8s clusters."
    INSTALL_LOGGER.info(msg)
    msg_update_callback.info(msg)
    if not cse_clusters:
        msg = "No CSE k8s clusters were found."
        INSTALL_LOGGER.info(msg)
        msg_update_callback.info(msg)
        return

    for cluster in cse_clusters:
        msg = f"Processing metadata of cluster '{cluster['name']}'."
        INSTALL_LOGGER.info(msg)
        msg_update_callback.info(msg)

        vapp_href = cluster['vapp_href']
        vapp = VApp(client, href=vapp_href)

        # This step removes the old 'cse.template' metadata and adds
        # cse.template.name and cse.template.revision metadata
        # using hard-coded values taken from github history
        metadata_dict = \
            pyvcloud_vcd_utils.metadata_to_dict(vapp.get_metadata())
        template_name = metadata_dict.get(
            server_constants.ClusterMetadataKey.TEMPLATE_NAME)
        if not template_name:
            msg = "Reconstructing template name and revision for cluster."
            INSTALL_LOGGER.info(msg)
            msg_update_callback.info(msg)

            new_template_name = \
                _construct_template_name_from_history(metadata_dict)