How to use the deap.tools.HallOfFame function in deap

To help you get started, we’ve selected a few deap examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github EpistasisLab / pennai / learn / deapGP / main.py View on Github external
toolbox.register("evaluate", evalSymbReg, points=[
                     x / 10. for x in range(-10, 10)])
    toolbox.register("select", tools.selTournament,
                     tournsize=tournsize)  # tournsize arguments
    toolbox.register("mate", gp.cxOnePoint)
    toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    toolbox.decorate("mate", gp.staticLimit(
        key=operator.attrgetter("height"), max_value=17))
    toolbox.decorate("mutate", gp.staticLimit(
        key=operator.attrgetter("height"), max_value=17))

    pop = toolbox.population(n=population_size)  # population_size arguments
    hof = tools.HallOfFame(1)

    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", numpy.mean)
    mstats.register("std", numpy.std)
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolbox, cxpb=crossover_rate,
                                   mutpb=mutation_rate, ngen=generations, stats=mstats, halloffame=hof, verbose=False)  # crossover_rate, mutation_rate, generations
    stats_table = []
    statslist = ["avg", "max", "min", "std"]
    statsterm = ["fitness", "size"]
    # make header
    stats_table_header = []
github DEAP / deap / examples / coev / hillis.py View on Github external
def main():
    random.seed(64)
    
    hosts = htoolbox.population(n=300)
    parasites = ptoolbox.population(n=300)
    hof = tools.HallOfFame(1)
    
    hstats = tools.Statistics(lambda ind: ind.fitness.values)
    hstats.register("avg", numpy.mean)
    hstats.register("std", numpy.std)
    hstats.register("min", numpy.min)
    hstats.register("max", numpy.max)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    MAXGEN = 50
    H_CXPB, H_MUTPB = 0.5, 0.3
    P_CXPB, P_MUTPB = 0.5, 0.3
    
    fits = htoolbox.map(htoolbox.evaluate, hosts, parasites)
    for host, parasite, fit in zip(hosts, parasites, fits):
github ronpandolfi / Xi-cam / xicam / plugins / cdsaxs / fitting.py View on Github external
logbook: list of dicts, length ngen, contains stats for each generation
    """
    toolbox = deap_base.Toolbox()

    residual=pickleable_residual(data,qx,qz,initial_guess,fit_mode='cmaes')

    toolbox.register('evaluate', residual)
    parallel = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(parallel)
    toolbox.register('map', pool.map)
    # last_time = time.perf_counter()
    process = psutil.Process()
    print('{} CPUs in node'.format(multiprocessing.cpu_count()))
    print('pid:{}'.format(os.getpid()))
    print(psutil.virtual_memory())
    halloffame = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register('avg', lambda x: np.mean(np.asarray(x)[np.isfinite(x)]) if np.asarray(x)[np.isfinite(
        x)].size != 0 else None)
    stats.register('std', lambda x: np.std(np.asarray(x)[np.isfinite(x)]) if np.asarray(x)[np.isfinite(
        x)].size != 0 else None)
    stats.register('min', lambda x: np.min(np.asarray(x)[np.isfinite(x)]) if np.asarray(x)[np.isfinite(
        x)].size != 0 else None)
    stats.register('max', lambda x: np.max(np.asarray(x)[np.isfinite(x)]) if np.asarray(x)[np.isfinite(
        x)].size != 0 else None)
    stats.register('fin', lambda x: np.sum(np.isfinite(x)) / np.size(x))
    # stats.register('cumtime', lambda x: time.perf_counter() - last_time)
    stats.register('rss_MB', lambda x: process.memory_info().rss / 1048576)
    stats.register('vms_MB', lambda x: process.memory_info().vms / 1048576)
    logbook = tools.Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
    population_list = []
github DEAP / deap / examples / ga / onemax_numpy.py View on Github external
def main():
    random.seed(64)
    
    pop = toolbox.population(n=300)
    
    # Numpy equality function (operators.eq) between two arrays returns the
    # equality element wise, which raises an exception in the if similar()
    # check of the hall of fame. Using a different equality function like
    # numpy.array_equal or numpy.allclose solve this issue.
    hof = tools.HallOfFame(1, similar=numpy.array_equal)
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    
    algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=40, stats=stats,
                        halloffame=hof)

    return pop, stats, hof
github ShuhuaGao / geppy / examples / sr / sr_boolean.py View on Github external
def main():
    # random.seed(2)  # for reproduction purpose
    n_pop = 50
    n_gen = 50

    pop = toolbox.population(n=n_pop)
    hof = tools.HallOfFame(3)

    stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    # start evolution
    pop, log = gep.gepSimple(pop, toolbox, mutpb=0.9, invpb=0.1, ispb=0.1, rispb=0.1, gpb=0.1,
                             cx1pb=0.4, cx2pb=0.2, cxgpb=0.1,
                             n_gen=n_gen, n_elites=2,
                             stats=stats, halloffame=hof)
    return pop, log, hof
github insilichem / gaudi / old / assess.py View on Github external
def main():
	pop = toolbox.population(n=args.pop)
	hof = deap.tools.HallOfFame(1)
	stats = deap.tools.Statistics(lambda ind: ind.fitness.values[0])
	stats.register("avg", numpy.mean)
	stats.register("min", numpy.min)
	stats.register("max", numpy.max)
	pop, log = deap.algorithms.eaMuPlusLambda(pop, toolbox, 
		mu = int(args.pop/2), lambda_= int(args.pop/2), cxpb=0.5, 
		mutpb=0.2, ngen=args.ngen, stats=stats, halloffame=hof)
	return pop, log, hof
github DEAP / deap / examples / es / cma_minfct.py View on Github external
def main():
    # The cma module uses the numpy random number generator
    numpy.random.seed(128)

    # The CMA-ES algorithm takes a population of one individual as argument
    # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
    # for more details about the rastrigin and other tests for CMA-ES    
    strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    #logger = tools.EvolutionLogger(stats.functions.keys())
   
    # The CMA-ES algorithm converge with good probability with those settings
    algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof)
    
    # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values)
    return hof[0].fitness.values[0]
github DEAP / deap / examples / gp / symbreg.py View on Github external
def main():
    random.seed(318)

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(1)
    
    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", numpy.mean)
    mstats.register("std", numpy.std)
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolbox, 0.5, 0.1, 40, stats=mstats,
                                   halloffame=hof, verbose=True)
    # print log
    return pop, log, hof
github DEAP / deap / examples / gp / spambase.py View on Github external
def main():
    random.seed(10)
    pop = toolbox.population(n=100)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    
    algorithms.eaSimple(pop, toolbox, 0.5, 0.2, 40, stats, halloffame=hof)

    return pop, stats, hof
github RDFLib / graph-pattern-learner / gp_learner.py View on Github external
def train(toolbox, population, run):
    hall_of_fame = deap.tools.HallOfFame(config.HOFSIZE)
    # pop = toolbox.population(n=50)
    pop = population
    g = 0
    logger.info(
        'Run %d, Generation %d: %d individuals',
        run, g, len(pop)
    )
    logger.debug('Population: %r', pop)

    # Evaluate the entire population
    _evaluate = partial(toolbox.evaluate, run=run, gen=g)
    eval_results = list(parallel_map(_evaluate, pop))
    logger.info(
        'Run %d, Generation %d: evaluated %d individuals',
        run, g, len(pop)
    )