How to use the pyglm.utils.io.segment_data function in pyglm

To help you get started, we’ve selected a few pyglm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github slinderman / theano_pyglm / test / synth_map_with_xv.py View on Github external
def run_synth_test():
    """ Run a test with synthetic data and MAP inference with cross validation
    """
    options, popn, data, popn_true, x_true = initialize_test_harness()
    
    # Get the list of models for cross validation
    base_model = make_model(options.model, N=data['N'], dt=0.001)
    models = get_xv_models(base_model)

    # TODO Segment data into training and cross validation sets
    train_frac = 0.75
    T_split = data['T'] * train_frac
    train_data = segment_data(data, (0,T_split))
    xv_data = segment_data(data, (T_split,data['T']))

    # Preprocess the data sequences
    train_data = popn.preprocess_data(train_data)
    xv_data = popn.preprocess_data(xv_data)

    # Sample random initial state
    x0 = popn.sample()

    # Track the best model and parameters
    best_ind = -1
    best_xv_ll = -np.Inf
    best_x = x0
    best_model = None

    # Fit each model using the optimum of the previous models
    train_lls = np.zeros(len(models))
github slinderman / theano_pyglm / test / synth_map_with_xv.py View on Github external
def run_synth_test():
    """ Run a test with synthetic data and MAP inference with cross validation
    """
    options, popn, data, popn_true, x_true = initialize_test_harness()
    
    # Get the list of models for cross validation
    base_model = make_model(options.model, N=data['N'], dt=0.001)
    models = get_xv_models(base_model)

    # TODO Segment data into training and cross validation sets
    train_frac = 0.75
    T_split = data['T'] * train_frac
    train_data = segment_data(data, (0,T_split))
    xv_data = segment_data(data, (T_split,data['T']))

    # Preprocess the data sequences
    train_data = popn.preprocess_data(train_data)
    xv_data = popn.preprocess_data(xv_data)

    # Sample random initial state
    x0 = popn.sample()

    # Track the best model and parameters
    best_ind = -1
    best_xv_ll = -np.Inf
    best_x = x0
    best_model = None

    # Fit each model using the optimum of the previous models
github slinderman / theano_pyglm / test / parallel_map_with_xv.py View on Github external
def run_parallel_map():
    """ Run a test with synthetic data and MCMC inference
    """
    options, popn, data, client, popn_true, x_true = initialize_parallel_test_harness()

    # Get the list of models for cross validation
    base_model = make_model(options.model, N=data['N'])
    models = get_xv_models(base_model)

    # Segment data into training and cross validation sets
    train_frac = 0.75
    T_split = data['T'] * train_frac
    train_data = segment_data(data, (0,T_split))
    xv_data = segment_data(data, (T_split,data['T']))

    # Sample random initial state
    x0 = popn.sample(None)

    # Track the best model and parameters
    best_ind = -1
    best_xv_ll = -np.Inf
    best_x = x0
    best_model = None

    use_existing = False

    start_time = time.clock()

    # Fit each model using the optimum of the previous models
    train_lls = np.zeros(len(models))
github slinderman / theano_pyglm / test / parallel_map_with_xv.py View on Github external
def run_parallel_map():
    """ Run a test with synthetic data and MCMC inference
    """
    options, popn, data, client, popn_true, x_true = initialize_parallel_test_harness()

    # Get the list of models for cross validation
    base_model = make_model(options.model, N=data['N'])
    models = get_xv_models(base_model)

    # Segment data into training and cross validation sets
    train_frac = 0.75
    T_split = data['T'] * train_frac
    train_data = segment_data(data, (0,T_split))
    xv_data = segment_data(data, (T_split,data['T']))

    # Sample random initial state
    x0 = popn.sample(None)

    # Track the best model and parameters
    best_ind = -1
    best_xv_ll = -np.Inf
    best_x = x0
    best_model = None

    use_existing = False

    start_time = time.clock()

    # Fit each model using the optimum of the previous models
github slinderman / theano_pyglm / scripts / split_data.py View on Github external
data_dir = '/Users/scott/Projects/pyglm/data/synth/dist/N16T300/2014_07_22-10_01/'

with open(os.path.join(data_dir, 'data.pkl')) as f:
    data = cPickle.load(f)
  

data_test = segment_data(data, (240,300))
with open(os.path.join(data_dir, 'data_test.pkl'), 'w') as f:
    cPickle.dump(data_test, f, protocol=-1)

ts = [15,30,60,120,180, 240]
datas = []

data_test
for t in ts:
    datas.append(segment_data(data, (0,t)))
        
for (t,d) in zip(ts, datas):
    with open(os.path.join(data_dir, 'data_%d.pkl' % t), 'w') as f:
        cPickle.dump(d, f, protocol=-1)
github slinderman / theano_pyglm / scripts / split_data.py View on Github external
import cPickle
import os

from pyglm.utils.io import segment_data


data_dir = '/Users/scott/Projects/pyglm/data/synth/dist/N16T300/2014_07_22-10_01/'

with open(os.path.join(data_dir, 'data.pkl')) as f:
    data = cPickle.load(f)
  

data_test = segment_data(data, (240,300))
with open(os.path.join(data_dir, 'data_test.pkl'), 'w') as f:
    cPickle.dump(data_test, f, protocol=-1)

ts = [15,30,60,120,180, 240]
datas = []

data_test
for t in ts:
    datas.append(segment_data(data, (0,t)))
        
for (t,d) in zip(ts, datas):
    with open(os.path.join(data_dir, 'data_%d.pkl' % t), 'w') as f:
        cPickle.dump(d, f, protocol=-1)