How to use the multiprocessing.Array function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cbassa / stvid / acquire.py View on Github external
sys.exit()
    else:
        tend = tnow + test_duration*u.s

    logger.info("Starting data acquisition")
    logger.info("Acquisition will end after "+tend.isot)

    # Get settings
    nx = cfg.getint(camera_type, 'nx')
    ny = cfg.getint(camera_type, 'ny')
    nz = cfg.getint(camera_type, 'nframes')

    # Initialize arrays
    z1base = multiprocessing.Array(ctypes.c_uint8, nx*ny*nz)
    z1 = np.ctypeslib.as_array(z1base.get_obj()).reshape(nz, ny, nx)
    t1base = multiprocessing.Array(ctypes.c_double, nz)
    t1 = np.ctypeslib.as_array(t1base.get_obj())
    z2base = multiprocessing.Array(ctypes.c_uint8, nx*ny*nz)
    z2 = np.ctypeslib.as_array(z2base.get_obj()).reshape(nz, ny, nx)
    t2base = multiprocessing.Array(ctypes.c_double, nz)
    t2 = np.ctypeslib.as_array(t2base.get_obj())

    image_queue = multiprocessing.Queue()

    # Set processes
    pcompress = multiprocessing.Process(target=compress,
                                        args=(image_queue, z1, t1, z2, t2, nx, ny,
                                              nz, tend.unix, path, device_id, cfg))
    if camera_type == "CV2":
        pcapture = multiprocessing.Process(target=capture_cv2,
                                           args=(image_queue, z1, t1, z2, t2,
                                                 nx, ny, nz, tend.unix, device_id, live))
github peeesspee / BitsOJ / Client_Windows / client.py View on Github external
def main():
	#################################
	# Initialize the database and returns the cursor 
	print("[ SETUP ] INITIALISING DATABASE ............")
	try:
		conn, cursor = manage_database.initialize_table()
		manage_local_ids.initialize_local_id(cursor)
	except Exception as error:
		ex_type,ex_obj, ex_tb = sys.exc_info()
		f_name = os.path.split(ex_tb.tb_frame.f_code.co_filename)[1]
		print(ex_type,f_name,ex_tb.tb_lineno)

	##################################
	# Create variables/lists that will be shared between processes
	data_changed_flags = multiprocessing.Array('i', 10)
	queue = multiprocessing.Queue()
	scoreboard = multiprocessing.Queue()
	for i in range(10):
		data_changed_flags[i] = 0
	# index    value         meaning
	# 0        0/1/2/3/4     Contest Not Started/Contest has been started/Running/Contest Stopped/Time Up
	# 1        0/1/2         Initialize/Verdict Not received/Verdict Received
	# 2        0/1/2         Initiaize/Query response Not received/Query response received
	# 3        1             Server NOt Accepting Submission
	# 4        0/1/3         Timer Stopped/ Timer running/Update Timer   
	# 5        0/1/2         Proper Connection/Single Client Disconnected/All Clients Disconnected
	# 6        1             Leader Board Update
	# 7        1             Problem Edited
	# 8        1             Blocked User
	# 9        1             Run ID Received
github davidhallac / TVGL / PaperCode / snapvxScalability.py View on Github external
info_i = node_info[etup[0]]
            info_j = node_info[etup[1]]
            ind_zij = length
            ind_uij = length
            length += info_i[X_LEN]
            ind_zji = length
            ind_uji = length
            length += info_j[X_LEN]
            # Information package for this edge
            tup = (etup, obj, con,\
                info_i[X_VARS], info_i[X_LEN], info_i[X_IND], ind_zij, ind_uij,\
                info_j[X_VARS], info_j[X_LEN], info_j[X_IND], ind_zji, ind_uji)
            edge_list.append(tup)
            edge_info[etup] = tup
        edge_z_vals = multiprocessing.Array('d', [0.0] * length)
        edge_u_vals = multiprocessing.Array('d', [0.0] * length)
        z_length = length

        # Populate sparse matrix A.
        # A has dimensions (p, n), where p is the length of the stacked vector
        # of node variables, and n is the length of the stacked z vector of
        # edge variables.
        # Each row of A has one 1. There is a 1 at (i,j) if z_i = x_j.
        A = lil_matrix((z_length, x_length), dtype=numpy.int8)
        for ei in self.Edges():
            etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
            info_edge = edge_info[etup]
            info_i = node_info[etup[0]]
            info_j = node_info[etup[1]]
            for offset in xrange(info_i[X_LEN]):
                row = info_edge[Z_ZIJIND] + offset
                col = info_i[X_IND] + offset
github segfault16 / modular-led-controller-workstation / audioled / project.py View on Github external
array = mp.Array(ctypes.c_uint8, 3 * device.getNumPixels(), lock=lock)
                virtualDevice = audioled.devices.VirtualOutput(device=realDevice,
                                                               num_pixels=realDevice.getNumPixels(),
                                                               shared_array=array,
                                                               shared_lock=lock,
                                                               num_rows=realDevice.getNumRows(),
                                                               start_index=0)

                oldPanelWrapper.setDevice(virtualDevice)
                fgDevice = oldPanelWrapper

        else:
            # New virtual output
            outputDevice = device
            lock = mp.Lock()
            array = mp.Array(ctypes.c_uint8, 3 * device.getNumPixels(), lock=lock)
            virtualDevice = audioled.devices.VirtualOutput(device=device,
                                                           num_pixels=device.getNumPixels(),
                                                           shared_array=array,
                                                           shared_lock=lock,
                                                           num_rows=device.getNumRows(),
                                                           start_index=0)
            fgDevice = virtualDevice
            realDevice = device

        # Start filtergraph process
        successful = False
        sleepfact = 1.
        while not successful:
            q = self._publishQueue.register()
            p = mp.Process(target=worker, args=(q, filterGraph, fgDevice, dIdx, slotId))
            p.start()
github acil-bwh / ChestImagingPlatform / cip_python / classification / classify_image_subtypes.py View on Github external
ct_labels_array = np.zeros(np.shape(in_ct))       
        num_patches_to_process = np.shape(self.unique_patch_labels)[0]        
 
        """
        Pack distance map in short array with two significant digits to improve memory efficiency
        """
        in_distance=(100.0*in_distance).astype('int16')

        """
        Make a shareable copy of the volumes 
        """
        global ct
        num_array_items = np.shape(in_ct)[0]* np.shape(in_ct)[1]* \
            np.shape(in_ct)[2]
        shared_array = multiprocessing.Array(ctypes.c_short, num_array_items, \
            lock=False)
        ct = np.frombuffer(shared_array, dtype = ctypes.c_short)
        ct = ct.reshape(np.shape(in_ct))
        ct[:] = in_ct[:]

        global lm
        num_array_items = np.shape(in_lm)[0]* np.shape(in_lm)[1]* \
            np.shape(in_lm)[2]
        shared_array_lm = multiprocessing.Array(ctypes.c_ushort, \
            num_array_items, lock=False)
        lm = np.frombuffer(shared_array_lm, dtype = ctypes.c_ushort)
        lm = lm.reshape(np.shape(in_lm))
        lm[:] = in_lm[:]

        global distance_image
        num_array_items = np.shape(in_distance)[0]* np.shape(in_distance)[1]* \
github WZBSocialScienceCenter / tmtoolkit / tmtoolkit / lda_utils / _evaluation_common.py View on Github external
arr_ctype = ctypes.c_int
        elif data.dtype == np.int32:
            arr_ctype = ctypes.c_int32
        elif data.dtype == np.int64:
            arr_ctype = ctypes.c_int64
        else:
            raise ValueError('dtype of `data` is not supported: `%s`' % data.dtype)

        if not hasattr(data, 'format'):  # dense matrix -> convert to sparse matrix in coo format
            data = coo_matrix(data)
        elif data.format != 'coo':
            data = data.tocoo()

        sparse_data_base = mp.Array(arr_ctype, data.data)
        sparse_rows_base = mp.Array(ctypes.c_int, data.row)  # TODO: datatype correct?
        sparse_cols_base = mp.Array(ctypes.c_int, data.col)  # TODO: datatype correct?

        logger.info('initializing evaluation with sparse matrix of format `%s` and shape %dx%d'
                    % (data.format, data.shape[0], data.shape[1]))

        return sparse_data_base, sparse_rows_base, sparse_cols_base
github GGiecold / Concurrent_AP / Concurrent_AP.py View on Github external
s_reduced_array.get_lock())
                         + numpy_args)) as pool:
                pool.map_async(multiprocessing_cluster_labels_B,
                      chunk_generator(N, 3 * N_processes), chunk_size)
                               
            pool.close()
            pool.join()
            
            s_reduced = to_numpy_array(*numpy_args)
            
            j = np.argmax(s_reduced)
            I[k] = ii[j]
            
            gc.collect()
            
        c_array = multiprocessing.Array(c_int, N, lock = True)
        
        chunk_size = get_chunk_size(N, 3 * N_processes)
        numpy_args = c_array, N, np.int32
    
        with closing(multiprocessing.Pool(N_processes, 
                     initializer = cluster_labels_init, 
                     initargs = (hdf5_file, I, c_array.get_lock()) 
                               + numpy_args)) as pool:
            pool.map_async(multiprocessing_cluster_labels_C, 
                  chunk_generator(N, 3 * N_processes), chunk_size)
        
        pool.close()
        pool.join()
        
        c = to_numpy_array(*numpy_args)
        c[I] = np.arange(K)
github baidu-security / openrasp-iast / openrasp_iast / core / components / communicator.py View on Github external
def __init__(self, data_struct):
        self._data_index = data_struct
        data_index = 0
        self._module_locks = {}
        for module in self._data_index:
            self._module_locks[module] = multiprocessing.Lock()
            for key in self._data_index[module]:
                self._data_index[module][key] = data_index
                data_index += 1
        self.shared_array = multiprocessing.Array('l', data_index)
github davidhallac / TVGL / PaperCode / snapvxScalability.py View on Github external
obj = self.node_objectives[nid]
            variables = self.node_variables[nid]
            con = self.node_constraints[nid]
            neighbors = [ni.GetNbrNId(j) for j in xrange(deg)]
            # Node's constraints include those imposed by edges
            for neighborId in neighbors:
                etup = self.__GetEdgeTup(nid, neighborId)
                econ = self.edge_constraints[etup]
                con += econ
            # Calculate sum of dimensions of all Variables for this node
            size = sum([var.size[0] for (varID, varName, var, offset) in variables])
            # Nearly complete information package for this node
            node_info[nid] = (nid, obj, variables, con, length, size, deg,\
                neighbors)
            length += size
        node_vals = multiprocessing.Array('d', [0.0] * length)
        x_length = length

        # Organize information for each node in final edge_list structure and
        # also helper edge_info structure
        edge_list = []
        edge_info = {}
        # Keeps track of the current offset necessary into the shared edge
        # values Arrays
        length = 0
        for ei in self.Edges():
            etup = self.__GetEdgeTup(ei.GetSrcNId(), ei.GetDstNId())
            obj = self.edge_objectives[etup]
            con = self.edge_constraints[etup]
            con += self.node_constraints[etup[0]] +\
                self.node_constraints[etup[1]]
            # Get information for each endpoint node
github mxmssh / manul / manul.py View on Github external
INFO(0, None, None, "Done")

    # if radamsa weight is not zero, check that we can actually execute it
    if "radamsa:0" not in args.mutator_weights:
        if sys.platform == "win32":
            check_binary("radamsa.exe")
        else:
            check_binary("radamsa")


    files = allocate_files_per_jobs(args)

    virgin_bits = None
    crash_bits = None
    if not args.simple_mode:
        virgin_bits = multiprocessing.Array("i", SHM_SIZE)
        crash_bits = multiprocessing.Array("i", SHM_SIZE)
        for i in range(0, SHM_SIZE):
            virgin_bits[i] = 255  # initializing with all 0xFFs
            crash_bits[i] = 255

    # allocating data structures where we store all statistics about our fuzzers
    stats = FuzzerStats()
    all_threads_stats = list()
    all_threads_handles = list()

    for i, files_piece in enumerate(files):
        stats_array = multiprocessing.Array("d", stats.get_len())
        t = multiprocessing.Process(target=run_fuzzer_instance, args=(files_piece, i, virgin_bits, args, stats_array,
                                                                      args.restore, crash_bits, dbi_setup))
        t.start()
        all_threads_stats.append(stats_array)