How to use the chaospy.Uniform function in chaospy

To help you get started, we’ve selected a few chaospy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github simetenn / uncertainpy / tests / test_parameters.py View on Github external
def test_iter(self):
        parameter_list = [["gbar_Na", 120, cp.Uniform(110, 130)],
                         ["gbar_K", 36, cp.Normal(36, 1)],
                         ["gbar_L", 0.3, cp.Chi(1, 1, 0.3)]]

        parameters = Parameters(parameter_list)

        result = [parameter for parameter in parameters]

        self.assertEqual(len(result), 3)

        self.assertIsInstance(result[0], Parameter)
        self.assertIsInstance(result[1], Parameter)
        self.assertIsInstance(result[2], Parameter)
github simetenn / uncertainpy / tests / test_parameters.py View on Github external
def test_setitem(self):

        parameter = Parameter("gbar_Na", 120, cp.Uniform(110, 130))
        parameters = Parameters()

        parameters["gbar_Na"] = parameter

        self.assertTrue("gbar_Na" in parameters.parameters)
        self.assertIsInstance(parameters["gbar_Na"], Parameter)
github UCL-CCS / EasyVVUQ / tests / test_multisampler.py View on Github external
# Set up samplers
    sweep1 = {
        "angle": [0.1, 0.2, 0.3],
        "height": [2.0, 10.0],
        "velocity": [10.0, 10.1, 10.2]
    }
    sampler1 = uq.sampling.BasicSweep(sweep=sweep1)

    sweep2 = {
        "air_resistance": [0.2, 0.3, 0.4]
    }
    sampler2 = uq.sampling.BasicSweep(sweep=sweep2)

    vary = {
        "gravity": cp.Uniform(9.8, 1.0),
        "mass": cp.Uniform(2.0, 10.0),
    }
    sampler3 = uq.sampling.RandomSampler(vary=vary, max_num=5)

    # Make a multisampler
    multisampler = uq.sampling.MultiSampler(sampler1, sampler2, sampler3)

    # Set the campaign to use this sampler
    my_campaign.set_sampler(multisampler)

    # Test reloading
    my_campaign.save_state(tmpdir + "test_multisampler.json")
    reloaded_campaign = uq.Campaign(state_file=tmpdir + "test_multisampler.json", work_dir=tmpdir)

    # Draw all samples
    my_campaign.draw_samples()
github UCL-CCS / EasyVVUQ / tests / test_multiapp.py View on Github external
"type": "float",
            "min": 0.0,
            "max": 1000.0,
            "default": 10.0}}

    encoder = uq.encoders.GenericEncoder(
        template_fname='tests/cannonsim/test_input/cannonsim.template',
        delimiter='#',
        target_filename='in.cannon')
    decoder = uq.decoders.SimpleCSV(
        target_filename='output.csv', output_columns=[
            'Dist', 'lastvx', 'lastvy'], header=0)
    collater = uq.collate.AggregateSamples(average=False)

    vary = {
        "gravity": cp.Uniform(9.8, 1.0),
        "mass": cp.Uniform(2.0, 10.0),
    }
    cannon_sampler = uq.sampling.RandomSampler(vary=vary, max_num=5)
    cannon_action = uq.actions.ExecuteLocal("tests/cannonsim/bin/cannonsim in.cannon output.csv")
    cannon_stats = uq.analysis.BasicStats(qoi_cols=['Dist', 'lastvx', 'lastvy'])

    return params, encoder, decoder, collater, cannon_sampler, cannon_action, cannon_stats
github simetenn / uncertainpy / tests / test_uncertainty_calculations.py View on Github external
def create_PCE_custom(self, uncertain_parameters=None, custom_argument=None):
            uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters)

            data = Data()

            q0, q1 = cp.variable(2)
            parameter_space = [cp.Uniform(), cp.Uniform()]
            distribution = cp.J(*parameter_space)

            data.uncertain_parameters = ["a", "b"]

            data.test_value = custom_argument
            data.add_features(["TestingModel1d", "feature0d", "feature1d", "feature2d"])

            U_hat = {}
            U_hat["TestingModel1d"] = cp.Poly([q0, q1*q0, q1])
            U_hat["feature0d"] = cp.Poly([q0, q1*q0, q1])
            U_hat["feature1d"] = cp.Poly([q0, q1*q0, q1])
            U_hat["feature2d"] = cp.Poly([q0, q1*q0, q1])

            return U_hat, distribution, data
github simetenn / uncertainpy / examples / hodgkin-huxley / hodgkin-huxley_uncertainty_estimation.py View on Github external
import uncertainpy
import chaospy as cp

from HodgkinHuxleyModel import HodgkinHuxleyModel



parameters = [["V_rest", -65, None],
              ["Cm", 1, cp.Uniform(0.8, 1.5)],
              ["gbar_Na", 120, cp.Uniform(80, 160)],
              ["gbar_K", 36, cp.Uniform(26, 49)],
              ["gbar_l", 0.3, cp.Uniform(0.13, 0.5)],
              ["E_Na", 50, cp.Uniform(30, 54)],
              ["E_K", -77, cp.Uniform(-74, -79)],
              ["E_l", -50.613, cp.Uniform(-61, -43)]]


model = HodgkinHuxleyModel(parameters=parameters)
model.setAllDistributions(uncertainpy.Distribution(0.2).uniform)

features = uncertainpy.NeuronFeatures(features_to_run="all")

exploration = uncertainpy.UncertaintyEstimation(model, features=features)

exploration.UQ()
github jonathf / chaospy / docs / fig / tutorial_figures.py View on Github external
def plot_figures():
    """Plot figures for tutorial."""
    numpy.random.seed(1000)


    def foo(coord, param):
        return param[0] * numpy.e ** (-param[1] * coord)

    coord = numpy.linspace(0, 10, 200)
    distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2))

    samples = distribution.sample(50)
    evals = numpy.array([foo(coord, sample) for sample in samples.T])

    plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2)
    plt.xlabel(r"\verb;coord;")
    plt.ylabel(r"function evaluations \verb;foo;")
    plt.savefig("demonstration.png")
    plt.clf()


    samples = distribution.sample(1000, "H")
    evals = [foo(coord, sample) for sample in samples.T]
    expected = numpy.mean(evals, 0)
    deviation = numpy.std(evals, 0)
github simetenn / uncertainpy / src / uncertainpy / core / uncertainty_calculations.py View on Github external
uncertain_parameters = self.convert_uncertain_parameters(uncertain_parameters)

        distribution = self.create_distribution(uncertain_parameters=uncertain_parameters)

        # nodes = distribution.sample(nr_samples, "M")

        problem = {
            "num_vars": len(uncertain_parameters),
            "names": uncertain_parameters,
            "bounds": [[0,1]]*len(uncertain_parameters)
        }

        # Create the Multivariate normal distribution
        dist_R = []
        for parameter in uncertain_parameters:
            dist_R.append(cp.Uniform())

        dist_R = cp.J(*dist_R)

        nr_sobol_samples = int(np.round(nr_samples/2.))

        nodes_R = saltelli.sample(problem, nr_sobol_samples, calc_second_order=False)

        nodes = distribution.inv(dist_R.fwd(nodes_R.transpose()))


        data = self.runmodel.run(nodes, uncertain_parameters)

        data.method = "monte carlo method. nr_samples={}".format(nr_samples)
        data.seed = seed

        logger = get_logger(self)
github simetenn / uncertainpy / examples / coffee_cup_dependent / uq_coffee_dependent_function.py View on Github external
return -alpha*kappa_hat*(T - T_env)

    # Solving the equation by integration.
    temperature = odeint(f, T_0, time, args=(alpha, kappa_hat, T_env))[:, 0]

    # Return time and model results
    return time, temperature


# Create a model from the coffee_cup_dependent function and add labels
model = un.Model(coffee_cup_dependent, labels=["Time (s)", "Temperature (C)"])

# Create the distributions
T_env_dist = cp.Uniform(15, 25)
alpha_dist = cp.Uniform(0.5, 1.5)
kappa_hat_dist = cp.Uniform(0.025, 0.075)/alpha_dist

# Define the parameters dictionary
parameters = {"alpha": alpha_dist,
              "kappa_hat": kappa_hat_dist,
              "T_env": T_env_dist}

# We can use the parameters dictionary directly
# when we set up the uncertainty quantification
UQ = un.UncertaintyQuantification(model=model, parameters=parameters)

# Perform the uncertainty quantification,
# which automatically use the Rosenblatt transformation
data = UQ.quantify()
github simetenn / uncertainpy / examples / bahl / uq_bahl.py View on Github external
def set_parameters(self, parameters):
        for parameter in parameters:
            self.h(parameter + " = " + str(parameters[parameter]))

        # These commands must be added for this specific
        # model to recalculate the parameters after they have been set
        self.h("recalculate_passive_properties()")
        self.h("recalculate_channel_densities()")


# Initialize the model with the start and end time of the stimulus
model = NeuronModelBahl(stimulus_start=100, stimulus_end=600)

# Define a parameter list
parameter_list = [["e_pas", -80, cp.Uniform(-60, -85)],
                  ["apical Ra", 261, cp.Uniform(150, 300)]]

# Initialize the features
features = un.SpikingFeatures()

# Perform the uncertainty quantification
UQ = un.UncertaintyQuantification(model=model,
                                  parameters=parameter_list,
                                  features=features)
UQ.quantify()