How to use the deap.tools.Logbook function in deap

To help you get started, we’ve selected a few deap examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DEAP / deap / examples / es / cma_plotting.py View on Github external
# The CMA-ES algorithm takes a population of one individual as argument
    # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
    # for more details about the rastrigin and other tests for CMA-ES    
    strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    halloffame = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    # Objects that will compile the data
    sigma = numpy.ndarray((NGEN,1))
    axis_ratio = numpy.ndarray((NGEN,1))
    diagD = numpy.ndarray((NGEN,N))
    fbest = numpy.ndarray((NGEN,1))
    best = numpy.ndarray((NGEN,N))
    std = numpy.ndarray((NGEN,N))

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()
        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
github nilinswap / neuro-evolution / postmidsem / codes / main_folder / glass / main_bp_without_clustring.py View on Github external
def main(seed=None, play = 0, NGEN = 40, MU = 4 * 10):
    random.seed(seed)
    # this has to be a multiple of 4. period.
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values[1])
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    pop = toolbox.population(n=MU)
    #network_obj = Neterr(indim, outdim, n_hidden, np.random)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))
    # print(pop)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)
github soravux / scoop / examples / deap_ga_evosn.py View on Github external
def main():
    random.seed(64)

    population = toolbox.population(n=300)
    hof = tools.ParetoFront()
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    CXPB, MUTPB, ADDPB, DELPB, NGEN = 0.5, 0.2, 0.01, 0.01, 40
    
    # Evaluate every individuals
    fitnesses = toolbox.map(toolbox.evaluate, population)
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit
    
    hof.update(population)
    record = stats.compile(population)
    logbook.record(gen=0, evals=len(population), **record)
    print(logbook.stream)
    
    # Begin the evolution
    for g in range(1, NGEN):
github nilinswap / neuro-evolution / main / codes / others / others / zdt1 / deap_nsga2.py View on Github external
def main(seed=None):
	random.seed(seed)

	NGEN = 250
	MU = 100
	CXPB = 0.9

	stats = tools.Statistics(lambda ind: ind.fitness.values)
	stats.register("avg", numpy.mean, axis=0)
	stats.register("std", numpy.std, axis=0)
	stats.register("min", numpy.min, axis=0)
	stats.register("max", numpy.max, axis=0)
	
	logbook = tools.Logbook()
	logbook.header = "gen", "evals", "std", "min", "avg", "max"
	
	pop = toolbox.population(n=MU)

	# Evaluate the individuals with an invalid fitness
	invalid_ind = [ind for ind in pop if not ind.fitness.valid]
	fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
	for ind, fit in zip(invalid_ind, fitnesses):
		ind.fitness.values = fit

	# This is just to assign the crowding distance to the individuals
	# no actual selection is done
	pop = toolbox.select(pop, len(pop))
	
	record = stats.compile(pop)
	logbook.record(gen=0, evals=len(invalid_ind), **record)
github nilinswap / neuro-evolution / main2-iterative.py View on Github external
def main(seed=None, play = 0, NGEN = 40, MU = 4 * 10):
    random.seed(seed)


      # this has to be a multiple of 4. period.
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values[1])
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    pop = toolbox.population(n=MU)
    #network_obj = Neterr(indim, outdim, n_hidden, np.random)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))
    # print(pop)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)
github PyXRD / PyXRD / pyxrd / refinement / methods / deap_pso_cma.py View on Github external
def run(self):
        """Will run this algorithm"""
        if self.verbose:
            column_names = ["gen", "evals", "best"]
            if self.stats is not None:
                column_names += list(self.stats.functions.keys())
            self.logbook = tools.Logbook()
            self.logbook.header = column_names

        for _ in range(self.ngen):
            # Check if the user has cancelled:
            if self._user_cancelled():
                self.refiner.status.message = "Stopping..."
                logger.info("User cancelled execution, stopping ...")
                break

            #ASK: Generate a new population:
            swarms = self._ask()
            #TELL: Update the strategy with the evaluated individuals
            self._tell(swarms)
            #RECORD: For logging:
            self._record(swarms)
github DEAP / deap / examples / ga / evosn.py View on Github external
def main():
    random.seed(64)

    population = toolbox.population(n=300)
    hof = tools.ParetoFront()
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    CXPB, MUTPB, ADDPB, DELPB, NGEN = 0.5, 0.2, 0.01, 0.01, 40
    
    # Evaluate every individuals
    fitnesses = toolbox.map(toolbox.evaluate, population)
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit
    
    hof.update(population)
    record = stats.compile(population)
    logbook.record(gen=0, evals=len(population), **record)
    print(logbook.stream)
    
    # Begin the evolution
    for g in range(1, NGEN):
github DEAP / deap / examples / ga / nsga3.py View on Github external
def main(seed=None):
    random.seed(seed)

    # Initialize statistics object
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # Compile statistics about the population
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
github nilinswap / neuro-evolution / main1_nll_mse_misc_com.py View on Github external
def main(seed=None, play = 0, NGEN = 40, MU = 4 * 10):
	#random.seed(seed)


	  # MU has to be a multiple of 4. period.
	CXPB = 0.9
	
	stats = tools.Statistics(lambda ind: ind.fitness.values[1])
	# stats.register("avg", numpy.mean, axis=0)
	# stats.register("std", numpy.std, axis=0)
	stats.register("min", numpy.min, axis=0)
	stats.register("max", numpy.max, axis=0)

	logbook = tools.Logbook()
	logbook.header = "gen", "evals", "std", "min", "avg", "max"
	toolbox.register("evaluate", minimize_src)
	time1 = time.time()
	toolbox.register("mutate", mymutate_src)
	pop_src = toolbox.population(n=MU)
	time2 = time.time()
	print("After population initialisation", time2 - time1)
	print(type(pop_src))
	#print("population initialized")
	#network_obj = Neterr(indim, outdim, n_hidden, np.random)
	# Evaluate the individuals with an invalid fitness
	invalid_ind = [ind for ind in pop_src if not ind.fitness.valid]

	fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
	for ind, fit in zip(invalid_ind, fitnesses):
		ind.fitness.values = fit