How to use the igraph.load function in igraph

To help you get started, we’ve selected a few igraph examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Lab41 / Circulo / circulo / unit_tests / test_metrics.py View on Github external
def setUp(self):
        self.G=igraph.load("karate.gml")
        
        membership=[
                    [0,1,2,3,7,11,12,13,17,19,21],
                    [4,5,6,10,16],
                    [8,9,14,15,18,20,22,23,24,25,26,27,28,29,30,31,32,33]]
        cover=igraph.VertexCover(self.G, membership)
        metrics=VertexCoverMetric.run_analysis(cover, weights=None)
        metrics.report()
        self.comm_metrics=metrics.comm_metrics
github Lab41 / Circulo / experiments / gephi_plot / create_graphml.py View on Github external
def analyze_json(worker):
    """
    Take in a set of json community detection results files and a graphml file representing the raw graph and output a
    graphml file that contains, as attributes, the results of the algorithms

    Args:
    worker: Named tuple of json_path raw_graph_path output_path timeout
    """
    signal.signal(signal.SIGALRM, __handle_timeout)
    signal.setitimer(signal.ITIMER_REAL, worker.timeout)

    print('Loading raw Graphml file truth file: %s'%worker.raw_graph_path)
    if worker.raw_graph_path is not None:
        G = igraph.load(worker.raw_graph_path)
    else:
        print("ERROR: Not able to load graph")
        return

    try:
        for json_path in worker.json_path:
            with open(json_path) as f:
                data = json.load(f)
                (name, algorithm) = data['job_name'].split('--')[:2]

                algo_name = 'algo_%s'%algorithm

                # Only if we are pulling least frequent
                if worker.pick_least_frequent or worker.pick_most_frequent:
                    # Calculate number of nodes in each community
                    community_counts = {}
github Lab41 / Circulo / data / football / do_example.py View on Github external
def example():
    """
    Performs the example outlined in the README. Draws the graph of one dataset.
    """
    G = ig.load("data/football.gml")
    r = conga(G)

    #optimal count
    optimal_cover = r[r.optimal_count]

    for comm in optimal_cover:
        for n in comm:
            print(G.vs[n]['label'])
        print("\n")
github vad / wiki-network / graph_enrich.py View on Github external
opts, files = p.parse_args()

    if not files:
        p.error("Give me a file, please ;-)")
    fn = files[0]

    lang, date, type_ = explode_dump_filename(fn)

    groups = ('bot', 'sysop', 'bureaucrat', 'checkuser', 'steward', 'import',
              'transwiki', 'uploader', 'ipblock-exempt', 'oversight',
              'founder', 'rollbacker', 'accountcreator', 'autoreviewer',
              'abusefilter')
    g = ig.load(fn)
    if opts.source:
        sourceg = ig.load(opts.source)
        for destv in g.vs:
            try:
                sourcev = sourceg.vs.select(username=destv['username'])[0]
            except IndexError:
                print destv['username'], 'not found in source'
                for group in groups:
                    destv[group] = None
                continue
            for group in groups:
                destv[group] = sourcev[group]

    else:
        for group in groups:
            addGroupAttribute(g, lang, group)

        print 'BLOCKED ACCOUNTS'
github cb-cities / sf_abm / sf_abm_mp2.py View on Github external
def map_edge_pop(pid):
    #logger.info('process ID is {}'.format(os.getpid()))
    print('process ID is {}, pid is {}'.format(os.getpid(), pid))
    #logger = logging.getLogger('main.one_step.map_edge_pop')
    #t0_process = time.time()
    
    ### Read initial graph
    g = igraph.load('data_repo/Imputed_data_False9_0509.graphmlz')
    #logger.debug('graph summary {}'.format(g.summary()))
    g.es['weights'] = g.es['sec_length']
    #logger.info('graph weights attribute created')
    
    day=1
    hour=3
    ### Read/Generate OD matrix for this time step
    #OD_matrix = random_OD(g)
    OD = scipy.sparse.load_npz('TNC/OD_matrices/DY{}_HR{}_OD.npz'.format(day, hour))
    #logger.debug('finish reading sparse OD matrix, shape is {}'.format(OD_matrix.shape))
    OD = OD.tolil()
    #logger.info('finish converting the matrix to lil')
    ### Load the dictionary used to find the osm_node_id from matrix row/col id
    OD_nodesID_dict = json.load(open('TNC/OD_matrices/DY{}_HR{}_node_dict.json'.format(day, hour)))
    #logger.info('finish loading nodesID_dict')
    g_vs_node_osmid = g.vs['node_osmid']
github neurodata / m2g / MR-OCP / mrcap / fibergraph.py View on Github external
def loadFromIgraph(self, filename, gformat="graphml"):
    """
    Load a sparse matrix from igraph as a numpy pickle

    Positional arguments:
    ====================
    filename - the file name/path to where you want to save the graph
    gformat - the format which you want to use to save the graph. Choices:
    """
    self.graph = igraph.load(filename, format=gformat)
github cb-cities / sf_abm / London_SSSP / London_abm_mp_sssp_full.py View on Github external
def main():
    logging.basicConfig(filename='London_abm_mp_sssp.log', level=logging.WARNING)
    logger = logging.getLogger('main')

    t_start = time.time()

    ### Read initial graph
    global g
    g = igraph.load('data_repo/London_Directed/London_0621.graphmlz') ### This file contains the weekday 9am link level travel time for SF, imputed data collected from a month worth of Google Directions API
    logger.debug('graph summary {}'.format(g.summary()))
    g.es['weight'] = g.es['length']
    logger.debug('graph weights attribute created')

    t0 = time.time()
    one_step()
    t1 = time.time()
    logger.debug('running time for one time step is {}'.format(t1-t0))
    ### Update graph
    #edge_weights = np.array(g.es['weights'])
    #edge_weights[list(edge_volume.keys())] = np.array(list(edge_volume.values()))
    #g.es['weights'] = edge_weights.tolist()
    t_end = time.time()
    print(t_end-t_start)
    logger.info('total run time is {} seconds'.format(t_end-t_start))
github Lab41 / Circulo / experiments / community_label.py View on Github external
def label_communities(input_file, results_filename, attributes_to_ignore, count_type="both"):
    # TODO: Consider refactoring to split edge vs node code more completely and only merge for display
    G = igraph.load(input_file)

    # Build up node_ids_to_community
    node_ids_to_communities = {}
    results = json.load(open(results_filename))

    community_size = {} # Total size of each community
    for i, communities in enumerate(results['membership']):
        node_ids_to_communities[i] = communities
        for community in communities:
            if community not in community_size:
                community_size[community] = 1
            else:
                community_size[community] += 1


    # iterate through verticies, collect labels we are interested in
github cb-cities / sf_abm / SF_ODSP / sf_abm.py View on Github external
def main():
    ### Read initial graph
    g = igraph.load('data_repo/Imputed_data_False9_0509.graphmlz')
    logger.debug('graph summary {}'.format(g.summary()))
    g.es['weights'] = g.es['sec_length']
    logger.info('graph weights attribute created')

    t0 = time.time()
    edge_volume = one_step(g, 1, 3)
    t1 = time.time()
    logger.debug('running time for one time step is {}'.format(t1-t0))
    ### Update graph
    edge_weights = np.array(g.es['weights'])
    edge_weights[list(edge_volume.keys())] = np.array(list(edge_volume.values()))
    g.es['weights'] = edge_weights.tolist()