How to use the deap.creator.FitnessMin function in deap

To help you get started, we’ve selected a few deap examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DEAP / deap / examples / es / cma_plotting.py View on Github external
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools

import matplotlib.pyplot as plt

# Problem size
N = 10
NGEN = 125

creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)

toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)

def main(verbose=True):
    # The cma module uses the numpy random number generator
    numpy.random.seed(64)

    # The CMA-ES algorithm takes a population of one individual as argument
    # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
    # for more details about the rastrigin and other tests for CMA-ES    
    strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    halloffame = tools.HallOfFame(1)
github nilinswap / neuro-evolution / main1_nll_mse_misc_com.py View on Github external
import os
from population import *
from network import Neterr
from chromosome import Chromosome, crossover
import traceback
n_hidden = 100
indim = 32
outdim = 5
#
network_obj_src = Neterr(indim, outdim, n_hidden, change_to_target = 0, rng = random)

network_obj_tar = Neterr(indim, outdim, n_hidden,change_to_target = 1, rng = random)
#creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))

creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0))
creator.create("Individual", Chromosome, fitness=creator.FitnessMin)
print("here network object created")
toolbox = base.Toolbox()

def minimize_src(individual):
	outputarr = network_obj_src.feedforward_ne(individual, final_activation=network.softmax)

	neg_log_likelihood_val = give_neg_log_likelihood(outputarr, network_obj_src.resty)
	mean_square_error_val = give_mse(outputarr, network_obj_src.resty)
	mis_error = find_misclas_error(outputarr, network_obj_src.resty)
	complexity = lambda ind: len(ind.conn_arr)
	ind_complexity = complexity(individual)
	# anyways not using these as you can see in 'creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))'
	# return neg_log_likelihood_val, mean_square_error_val, false_positve_rat, false_negative_rat
	return (neg_log_likelihood_val, mean_square_error_val, mis_error, ind_complexity)

def minimize_tar(individual):
github nilinswap / neuro-evolution / main_bp_without_clustring.py View on Github external
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence
from deap import creator
from deap import tools
import os
from population import *
from network import Neterr
from chromosome import Chromosome, crossover

n_hidden = 100
indim = 8
outdim = 2
network_obj = Neterr(indim, outdim, n_hidden, np.random)
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))
creator.create("Individual", Chromosome, fitness=creator.FitnessMin)

toolbox = base.Toolbox()


def minimize(individual):
    outputarr = network_obj.feedforward_ne(individual)

    neg_log_likelihood_val = give_neg_log_likelihood(outputarr, network_obj.resty)
    mean_square_error_val = give_mse(outputarr, network_obj.resty)
    false_positve_rat = give_false_positive_ratio(outputarr, network_obj.resty)
    false_negative_rat = give_false_negative_ratio(outputarr, network_obj.resty)

    return neg_log_likelihood_val, mean_square_error_val, false_positve_rat, false_negative_rat


def mycross(ind1, ind2, gen_no):
github DEAP / deap / examples / coev / hillis.py View on Github external
index = random.randint(0, len(individual))
        individual.insert(index, genWire(dimension))
    if random.random() < delpb:
        index = random.randrange(len(individual))
        del individual[index]
    return individual,

def mutParasite(individual, indmut, indpb):
    for i in individual:
        if random.random() < indpb:
            indmut(i)
    return individual,

creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Host", list, fitness=creator.FitnessMin)
creator.create("Parasite", list, fitness=creator.FitnessMax)

htoolbox = base.Toolbox()
ptoolbox = base.Toolbox()

htoolbox.register("network", genNetwork, dimension=INPUTS, min_size=9, max_size=12)
htoolbox.register("individual", tools.initIterate, creator.Host, htoolbox.network)
htoolbox.register("population", tools.initRepeat, list, htoolbox.individual)

ptoolbox.register("parasite", getParasite, dimension=INPUTS)
ptoolbox.register("individual", tools.initRepeat, creator.Parasite, ptoolbox.parasite, 20)
ptoolbox.register("population", tools.initRepeat, list, ptoolbox.individual)

htoolbox.register("evaluate", evalNetwork, dimension=INPUTS)
htoolbox.register("mate", tools.cxTwoPoint)
htoolbox.register("mutate", mutNetwork, dimension=INPUTS, mutpb=0.2, addpb=0.01,
github DEAP / deap / doc / code / tutorials / part_2 / 2_2_3_arithmetic_expression.py View on Github external
## 2.2.3 Arithmetic expression
import operator

from deap import base
from deap import creator
from deap import gp
from deap import tools

pset = gp.PrimitiveSet("MAIN", arity=1)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)

creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin,
               pset=pset)

toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)
toolbox.register("individual", tools.initIterate, creator.Individual,
                 toolbox.expr)
github EpistasisLab / pennai / learn / deapGP / main.py View on Github external
numpy.random.seed(random_state)
    pset = gp.PrimitiveSet("MAIN", 1)
    pset.addPrimitive(operator.add, 2)
    pset.addPrimitive(operator.sub, 2)
    pset.addPrimitive(operator.mul, 2)
    pset.addPrimitive(protectedDiv, 2)
    pset.addPrimitive(operator.neg, 1)
    pset.addPrimitive(math.cos, 1)
    pset.addPrimitive(math.sin, 1)
    # generations of different Ephemeral construction name
    pset.addEphemeralConstant(
        Ephe_Cont_Name, lambda: numpy.random.randint(-1, 2))
    pset.renameArguments(ARG0='x')

    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()
    toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)
    toolbox.register("individual", tools.initIterate,
                     creator.Individual, toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("compile", gp.compile, pset=pset)

    def evalSymbReg(individual, points):
        # Transform the tree expression in a callable function
        func = toolbox.compile(expr=individual)
        # Evaluate the mean squared error between the expression
        # and the real function : x**4 + x**3 + x**2 + x
        sqerrors = ((func(x) - x**4 - x**3 - x**2 - x)**2 for x in points)
        return math.fsum(sqerrors) / len(points),
github DEAP / deap / examples / ga / ga_tsp.py View on Github external
tsp = False

if tsp:
    IND_SIZE = tsp["TourSize"]
    distance_map = tsp["DistanceMatrix"]
else:
    IND_SIZE = 25
    distance_map = [[0]*IND_SIZE for _ in range(IND_SIZE)]
    for i in range(IND_SIZE):
        for j in range(IND_SIZE):
            d = random.random()
            distance_map[i][j] = d
            distance_map[j][i] = d

creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)

toolbox = base.Toolbox()

# Attribute generator
toolbox.register("indices", random.sample, xrange(IND_SIZE), IND_SIZE)

# Structure initializers
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

def evalTSP(individual):
    distance = distance_map[individual[-1]][individual[0]]
    for gene1, gene2 in zip(individual[0:-1], individual[1:]):
        distance += distance_map[gene1][gene2]
    return distance,
github nilinswap / neuro-evolution / postmidsem / codes / main_folder / glass / main_bp_without_clustring.py View on Github external
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence
from deap import creator
from deap import tools
import os
from population import *
from network import Neterr
from chromosome import Chromosome, crossover

n_hidden = 100
indim = 9
outdim = 7
network_obj = Neterr(indim, outdim, n_hidden, np.random)
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, 0.0, 0.0))
creator.create("Individual", Chromosome, fitness=creator.FitnessMin)

toolbox = base.Toolbox()


def minimize(individual):
    outputarr = network_obj.feedforward_ne(individual)

    neg_log_likelihood_val = give_neg_log_likelihood(outputarr, network_obj.resty)
    mean_square_error_val = give_mse(outputarr, network_obj.resty)
    false_positve_rat = give_false_positive_ratio(outputarr, network_obj.resty)
    false_negative_rat = give_false_negative_ratio(outputarr, network_obj.resty)

    return neg_log_likelihood_val, mean_square_error_val, false_positve_rat, false_negative_rat


def mycross(ind1, ind2, gen_no):
github PyXRD / PyXRD / mixture / models / methods / deap_gen.py View on Github external
def run(self, context, ngen=NGEN, stagn_ngen=STAGN_NGEN, stagn_tol=STAGN_TOL,
            factr_lambda=FACTR_LAMBDA, factr_init_lambda=FACTR_INIT_LAMBDA,
            max_init_lambda=MAX_INIT_LAMBDA, min_init_lambda=MIN_INIT_LAMBDA, **kwargs):

        N = len(context.ref_props)
        init_lambda = max(min(N * factr_init_lambda, max_init_lambda), min_init_lambda)
        lambda_ = min(N * factr_lambda, MAX_LAMBDA)

        # Individual generation:
        bounds = np.array(context.ranges)
        creator.create(
            "Individual", pyxrd_array,
            fitness=creator.FitnessMin,
            context=context,
            min_bounds=bounds[:, 0].copy(),
            max_bounds=bounds[:, 1].copy(),
        )

        # Makes sure individuals stay in-bound:
        def create_individual(lst):
            arr = np.array(lst).clip(bounds[:, 0], bounds[:, 1])
            return creator.Individual(arr)

        # Toolbox setup:
        toolbox = base.Toolbox()
        toolbox.register("evaluate", evaluate)

        # Setup strategy:
        strategy = CustomStrategy(centroid=context.initial_solution, sigma=2, lambda_=lambda_)
github DEAP / deap / examples / ga / kursawefct.py View on Github external
#    License along with DEAP. If not, see .

import array
import logging
import random

import numpy

from deap import algorithms
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools

creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)

toolbox = base.Toolbox()

# Attribute generator
toolbox.register("attr_float", random.uniform, -5, 5)

# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, 3)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

def checkBounds(min, max):
    def decorator(func):
        def wrappper(*args, **kargs):
            offspring = func(*args, **kargs)
            for child in offspring:
                for i in range(len(child)):