How to use the psutil.cpu_count function in psutil

To help you get started, we’ve selected a few psutil examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github giampaolo / psutil / test / _windows.py View on Github external
def test_cpu_count(self):
        num_cpus = int(os.environ['NUMBER_OF_PROCESSORS'])
        self.assertEqual(num_cpus, psutil.cpu_count())
github opesci / devito / devito / archinfo.py View on Github external
get = lambda k: [i for i in lines if i.startswith(k)][0].split(':')[1].strip()
            cpu_info['flags'] = get('flags').split()
            cpu_info['brand'] = get('model name')
        except IndexError:
            # The /proc/cpuinfo format doesn't follow a standard, and on some
            # more or less exotic combinations of OS and platform it might not
            # be what we expect, hence ending up here
            pass
    if not all(i in cpu_info for i in ('flags', 'brand')):
        # Fallback
        ci = cpuinfo.get_cpu_info()
        cpu_info['flags'] = ci.get('flags')
        cpu_info['brand'] = ci.get('brand')

    # Detect number of logical cores
    logical = psutil.cpu_count(logical=True)
    if not logical:
        # Never bumped into a platform that make us end up here, yet
        # But we try to cover this case anyway, with `lscpu`
        try:
            logical = lscpu()['CPU(s)']
        except KeyError:
            warning("Logical core count autodetection failed")
            logical = 1
    cpu_info['logical'] = logical

    # Detect number of physical cores
    # TODO: on multi-socket systems + unix, can't use psutil due to
    # `https://github.com/giampaolo/psutil/issues/1558`
    mapper = {}
    if lines:
        # Copied and readapted from psutil
github cyberbotics / webots / resources / web / server / simulation_server.py View on Github external
global gpu_load_compute
        global gpu_load_memory
        memory = psutil.virtual_memory()
        swap = psutil.swap_memory()
        if nvidia:
            nvmlHandle = nvmlDeviceGetHandleByIndex(0)
            gpu = str(nvmlDeviceGetName(nvmlHandle))
            gpu_memory = nvmlDeviceGetMemoryInfo(nvmlHandle)
            gpu_ram = gpu_memory.total / (1024 * 1048576)
            gpu += " - " + str(gpu_ram) + "GB"
        else:
            gpu = "Not recognized"
        ram = str(int(round(float(memory.total) / (1024 * 1048576)))) + "GB"
        ram += " (swap: " + str(int(round(float(swap.total) / (1024 * 1048576)))) + "GB)"
        real_cores = psutil.cpu_count(False)
        cores_ratio = psutil.cpu_count(True) / real_cores
        cores = " (" + str(cores_ratio) + "x " + str(real_cores) + " cores)"
        if sys.platform.startswith('linux'):
            distribution = platform.linux_distribution()
            os_name = 'Linux ' + distribution[0] + " " + distribution[1] + " " + distribution[2]
            command = "cat /proc/cpuinfo"
            all_info = str(subprocess.check_output(command, shell=True).strip())
            for line in all_info.split("\n"):
                if "model name" in line:
                    cpu = re.sub(".*model name.*:", "", line, 1)
                    break
        elif sys.platform == 'win32':
            computer = wmi.WMI()
            os_info = computer.Win32_OperatingSystem()[0]
            cpu = computer.Win32_Processor()[0].Name
            os_name = os_info.Name.encode('utf-8').split('|')[0] + ", version "
            os_name += os_info.Version
github DingGuodong / LinuxBashShellScriptForOps / projects / performances / Linux / getLoadAverage.py View on Github external
uptime_minutes = int(uptime_total_seconds / 60 % 60)
uptime_seconds = int(uptime_total_seconds % 60)
print "uptime: %d days %d hours %d minutes %d seconds" % (uptime_days, uptime_hours, uptime_minutes, uptime_seconds)

user_number = len(psutil.users())
print "%d user:" % user_number
print "\t\\"
for user_tuple in psutil.users():
    user_name = user_tuple[0]
    user_terminal = user_tuple[1]
    user_host = user_tuple[2]
    user_login_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(user_tuple[3]))
    print "\t|- user online: %s, login from %s with terminal %s at %s" % (
        user_name, user_host, user_terminal, user_login_time)

cpu_count = psutil.cpu_count()
try:
    with open('/proc/loadavg', 'r') as f:
        loadavg_c = f.read().split(' ')
        loadavg = dict()
        if loadavg_c is not None:
            loadavg['loadavg_1'] = loadavg_c[0]
            loadavg['loadavg_5'] = loadavg_c[1]
            loadavg['loadavg_15'] = loadavg_c[2]
            loadavg['nr'] = loadavg_c[3]
            loadavg['last_pid'] = loadavg_c[4]
    print "load average: %s, %s, %s" % (loadavg['loadavg_1'], loadavg['loadavg_5'], loadavg['loadavg_15'])
    if float(loadavg['loadavg_15']) > cpu_count:
        print "Note: cpu 15 min load is high!"
    if float(loadavg['loadavg_5']) > cpu_count:
        print "Note: cpu 5 min load is high!"
    if float(loadavg['loadavg_1']) > cpu_count:
github GNS3 / gns3-server / gns3server / controller / gns3vm / vmware_gns3_vm.py View on Github external
async def _set_vcpus_ram(self, vcpus, ram):
        """
        Set the number of vCPU cores and amount of RAM for the GNS3 VM.

        :param vcpus: number of vCPU cores
        :param ram: amount of RAM
        """

        # memory must be a multiple of 4 (VMware requirement)
        if ram % 4 != 0:
            raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))

        available_vcpus = psutil.cpu_count(logical=True)
        if not float(vcpus).is_integer():
            raise GNS3VMError("The allocated vCPUs value is not an integer: {}".format(vcpus))
        if vcpus > available_vcpus:
            raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))

        try:
            pairs = VMware.parse_vmware_file(self._vmx_path)
            if vcpus > 1:
                pairs["numvcpus"] = str(vcpus)
                cores_per_sockets = int(vcpus / psutil.cpu_count(logical=False))
                if cores_per_sockets > 1:
                    pairs["cpuid.corespersocket"] = str(cores_per_sockets)
                pairs["memsize"] = str(ram)
                VMware.write_vmx_file(self._vmx_path, pairs)
            log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
        except OSError as e:
github MolSSI / QCEngine / qcengine / config.py View on Github external
#  The QC code runs on a different node than the node running this Python function, which may have different info

    global _global_values
    if _global_values is None:
        _global_values = {}
        _global_values["hostname"] = socket.gethostname()
        _global_values["memory"] = round(psutil.virtual_memory().available / (1024 ** 3), 3)
        _global_values["username"] = getpass.getuser()

        # Work through VMs and logical cores.
        if hasattr(psutil.Process(), "cpu_affinity"):
            cpu_cnt = len(psutil.Process().cpu_affinity())
        else:
            cpu_cnt = psutil.cpu_count(logical=False)
            if cpu_cnt is None:
                cpu_cnt = psutil.cpu_count(logical=True)

        _global_values["ncores"] = cpu_cnt
        _global_values["nnodes"] = 1

        _global_values["cpuinfo"] = cpuinfo.get_cpu_info()
        _global_values["cpu_brand"] = _global_values["cpuinfo"]["brand"]

    if key is None:
        return _global_values.copy()
    else:
        return _global_values[key]
github ronpandolfi / Xi-cam / xicam / plugins / tomography / __init__.py View on Github external
This function is either self.run3DPreview or self.runSlicePreview
        finished : function/QtCore.Slot, optional
            Slot to receive the background threads finished signal
        dims : typle, optional
            Tuple containing dimensions of dataset to be reconstructed
        fixed_func : type class
            A dynamic class with only the necessary attributes to be run in a workflow pipeline. This is used for
            parameter range tests to create the class with the parameter to be run and send it to a background thread.
            See FunctionManager.testParameterRange for more details
        prange: dict, optional
            Dictionary containing parameter being tested for TestParamRange, and the function the parameter belongs to
        """
        bg_functionstack = threads.method(callback_slot=callback, finished_slot=finished,
                                          lock=threads.mutex)(self.manager.loadPreviewData)
        bg_functionstack(self.centerwidget.widget(self.currentIndex()), dims=dims,
                         ncore=cpu_count(), fixed_func=fixed_func, prange=prange)
github Stumblinbear / Prism / prism / plugins- / system / prism_system / views.py View on Github external
def get_cpu_count():
	cpu_count = psutil.cpu_count(logical=False)
	return (cpu_count, psutil.cpu_count(logical=True) - cpu_count)
github ninadmhatre / zual / dashboard_mods / ServerAddon.py View on Github external
def _get_cpu_stats(self):
        cpu_info = {'processors': ps.cpu_count(), 'times': ps.cpu_times(),
                    'load': ps.cpu_percent(percpu=True)}
        return cpu_info