How to use the hpccm.building_blocks.packages.packages function in hpccm

To help you get started, we’ve selected a few hpccm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github NVIDIA / hpc-container-maker / hpccm / building_blocks / openmpi.py View on Github external
toolchain=self.__toolchain,
            **kwargs)

        # Container instructions
        if self.repository:
            if self.branch:
                self += comment('OpenMPI {} {}'.format(self.repository,
                                                       self.branch))
            elif self.commit:
                self += comment('OpenMPI {} {}'.format(self.repository,
                                                       self.commit))
            else:
                self += comment('OpenMPI {}'.format(self.repository))
        else:
            self += comment('OpenMPI version {}'.format(self.__version))
        self += packages(ospackages=self.__ospackages)
        self += self.__bb
github NVIDIA / hpc-container-maker / hpccm / building_blocks / nsight_systems.py View on Github external
def __instructions(self):
        """Fill in container instructions"""

        self += comment('NVIDIA Nsight Systems {}'.format(self.__version))

        if self.__ospackages:
            self += packages(ospackages=self.__ospackages)

        if self.__cli:
            package = 'nsight-systems-cli-{}'.format(self.__version)
        else:
            package = 'nsight-systems-{}'.format(self.__version)

        self += packages(
            apt_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/7fa2af80.pub'.format(self.__distro_label, self.__arch_key_label)],
            apt_repositories=['deb https://developer.download.nvidia.com/devtools/repo-deb/{}/ /'.format(self.__arch_repo_label)],
            ospackages=[package],
            yum_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/7fa2af80.pub'.format(self.__distro_label, self.__arch_key_label)],
            yum_repositories=['https://developer.download.nvidia.com/devtools/repo-rpm/{}'.format(self.__arch_repo_label)])
github NVIDIA / hpc-container-maker / hpccm / building_blocks / gnu.py View on Github external
Stage1 += g.runtime()
        ```
        """
        self.rt += comment('GNU compiler runtime')
        if self.__source:
            self.rt += copy(_from=_from,
                            dest=posixpath.join(self.prefix, 'lib64'),
                            src=posixpath.join(self.prefix, 'lib64'))
            if self.ldconfig:
                self.rt += shell(commands=[self.ldcache_step(
                    directory=posixpath.join(self.prefix, 'lib64'))])
            else:
                self.rt += environment(variables=self.environment_step(
                    include_only=['LD_LIBRARY_PATH']))
        else:
            self.rt += packages(
                apt=self.__runtime_debs,
                apt_ppas=self.__extra_repo_apt,
                release_stream=bool(self.__version), # True / False
                scl=bool(self.__version), # True / False
                yum=self.__runtime_rpms)

        return str(self.rt)
github NVIDIA / hpc-container-maker / hpccm / building_blocks / llvm.py View on Github external
def runtime(self, _from='0'):
        """Generate the set of instructions to install the runtime specific
        components from a build in a previous stage.

        # Examples

        ```python
        l = llvm(...)
        Stage0 += l
        Stage1 += l.runtime()
        ```
        """
        self.rt += comment('LLVM compiler runtime')
        if self.__runtime_ospackages:
            self.rt += packages(ospackages=self.__runtime_ospackages)
        self.rt += packages(apt=self.__runtime_debs,
                            apt_keys=self.__apt_keys,
                            apt_repositories=self.__apt_repositories,
                            scl=bool(self.__version), # True / False
                            yum=self.__runtime_rpms)
        return str(self.rt)
github NVIDIA / hpc-container-maker / hpccm / building_blocks / nv_hpc_sdk.py View on Github external
def runtime(self, _from='0'):
        """Generate the set of instructions to install the runtime specific
        components from a build in a previous stage.

        # Examples

        ```python
        n = nv_hpc_sdk(...)
        Stage0 += n
        Stage1 += n.runtime()
        ```
        """
        self.rt += comment('NVIDIA HPC SDK')

        if self.__runtime_ospackages:
            self.rt += packages(ospackages=self.__runtime_ospackages)

        self.rt += copy(_from=_from,
                        src=posixpath.join(self.__basepath, 'REDIST', '*.so*'),
                        dest=posixpath.join(self.__basepath, 'lib', ''))

        if self.__mpi:
            self.rt += copy(_from=_from, src=self.__mpipath,
                            dest=self.__mpipath)

        self.rt += environment(variables=self.environment_step(runtime=True))

        return str(self.rt)
github NVIDIA / hpc-container-maker / hpccm / building_blocks / intel_psxe.py View on Github external
def __instructions(self):
        """Fill in container instructions"""

        self += comment('Intel Parallel Studio XE')
        self += packages(ospackages=self.__ospackages)
        self += copy(src=self.__tarball,
                     dest=posixpath.join(self.__wd, self.__tarball_name))
        if self.__license and not '@' in self.__license:
            # License file
            self += copy(src=self.__license,
                         dest=posixpath.join(self.__wd, 'license.lic'))
        self += shell(commands=self.__commands)

        if self.__psxevars:
            # Source the mpivars environment script when starting the
            # container, but the variables not be available for any
            # subsequent build steps.
            self += shell(commands=['echo "source {0}/compilers_and_libraries/linux/bin/compilervars.sh intel64" >> {1}'.format(self.__prefix, self.__bashrc)])
        else:
            self += environment(variables=self.environment_step())
github NVIDIA / hpc-container-maker / hpccm / building_blocks / ofed.py View on Github external
def __instructions(self):
        """Fill in container instructions"""
        self += comment('OFED')
        if self.__prefix:
            commands = []

            # Extract to a prefix - not a "real" package manager install
            self += packages(ospackages=self.__deppackages)
            self += packages(download=True, extra_opts=self.__extra_opts,
                             extract=self.__prefix,
                             ospackages=self.__ospackages,
                             powertools=self.__powertools)

            # library symlinks
            if self.__symlink:
                self.__deppackages.append('findutils')

                commands.append('mkdir -p {0} && cd {0}'.format(
                    posixpath.join(self.__prefix, 'lib')))
                # Prune the symlink directory itself and any debug
                # libraries
                commands.append('find .. -path ../lib -prune -o -name "*valgrind*" -prune -o -name "lib*.so*" -exec ln -s {} \;')
                commands.append('cd {0} && ln -s usr/bin bin && ln -s usr/include include'.format(
                    self.__prefix))
github NVIDIA / hpc-container-maker / hpccm / building_blocks / openblas.py View on Github external
self.__bb = generic_build(
            annotations={'version': self.__version},
            base_annotation=self.__class__.__name__,
            build=['make {}'.format(' '.join(self.__make_opts))],
            comment=False,
            directory='OpenBLAS-{}'.format(self.__version),
            devel_environment=self.environment_variables,
            install=['make install PREFIX={}'.format(self.__prefix)],
            prefix=self.__prefix,
            runtime_environment=self.environment_variables,
            url='{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version),
            **kwargs)

        # Container instructions
        self += comment('OpenBLAS version {}'.format(self.__version))
        self += packages(ospackages=self.__ospackages)
        self += self.__bb
github NVIDIA / hpc-container-maker / hpccm / building_blocks / mvapich2_gdr.py View on Github external
def __instructions(self):
        """Fill in container instructions"""

        self += comment('MVAPICH2-GDR version {}'.format(self.version))
        self += packages(ospackages=self.__ospackages)
        self += shell(commands=self.__commands)
        self += environment(variables=self.environment_step())
github NVIDIA / hpc-container-maker / hpccm / building_blocks / intel_mpi.py View on Github external
def __instructions(self):
        """Fill in container instructions"""

        self += comment('Intel MPI version {}'.format(self.__version))

        if self.__ospackages:
            self += packages(ospackages=self.__ospackages)

        if not self.__eula:
            raise RuntimeError('Intel EULA was not accepted.  To accept, see the documentation for this building block')

        self += packages(
            apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
            apt_repositories=['deb https://apt.repos.intel.com/mpi all main'],
            ospackages=['intel-mpi-{}'.format(self.__version)],
            yum_keys=['https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
            yum_repositories=['https://yum.repos.intel.com/mpi/setup/intel-mpi.repo'])

        # Set the environment
        if self.__mpivars:
            # Source the mpivars environment script when starting the
            # container, but the variables not be available for any
            # subsequent build steps.
            self += shell(commands=['echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> {}'.format(self.__bashrc)])
        else:
            # Set the environment so that it will be available to
            # subsequent build steps and when starting the container,
            # but this may miss some things relative to the mpivars