How to use the deephyper.benchmarks.keras_cmdline.fill_missing_defaults function in deephyper

To help you get started, we’ve selected a few deephyper examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github deephyper / deephyper / deephyper / benchmarks / dummy2 / regression.py View on Github external
def run(param_dict):
    param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
    pprint(param_dict)
    
    timer.start('stage in')
    if param_dict['data_source']:
        data_source = param_dict['data_source']
    else:
        data_source = os.path.dirname(os.path.abspath(__file__))
        data_source = os.path.join(data_source, 'data')

    paths = util.stage_in(['dataset'], source=data_source, dest=param_dict['stage_in_destination'])
    path = paths['dataset']
    
    data = np.loadtxt(path)
    training_x = data[:,0]
    training_y = data[:,1]
    n_pt = len(training_x)
github deephyper / deephyper / benchmarks / cifar10cnn / cifar10_cnn.py View on Github external
def run(param_dict):
    param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
    optimizer = keras_cmdline.return_optimizer(param_dict)
    pprint(param_dict)
    start_time = time.time()
    timer.start('stage in')
    if param_dict['data_source']:
        data_source = param_dict['data_source']
    else:
        data_source = os.path.dirname(os.path.abspath(__file__))
        data_source = os.path.join(data_source, 'data')

    (x_train, y_train), (x_test, y_test) = load_data(
        origin=os.path.join(data_source, 'cifar-10-python.tar.gz'),
        dest=param_dict['stage_in_destination'],
    )

    timer.end()
github deephyper / deephyper / deephyper / benchmarks / b3 / babi_rnn.py View on Github external
def run(param_dict):
    param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
    optimizer = keras_cmdline.return_optimizer(param_dict)
    pprint(param_dict)

    BATCH_SIZE = param_dict['batch_size']
    EPOCHS = param_dict['epochs']
    DROPOUT = param_dict['dropout']
    ACTIVATION = param_dict['activation']
    TIMEOUT = param_dict['timeout']
    
    if param_dict['rnn_type'] == 'GRU':
        RNN = layers.GRU
    elif param_dict['rnn_type'] == 'SimpleRNN':
        RNN = layers.SimpleRNN
    else:
        RNN = layers.LSTM
github deephyper / deephyper / deephyper / benchmarks / mnistcnn / mnistcnn.py View on Github external
def run(param_dict):
    param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
    optimizer = keras_cmdline.return_optimizer(param_dict)
    pprint(param_dict)
    
    timer.start('stage in')
    if param_dict['data_source']:
        data_source = param_dict['data_source']
    else:
        data_source = os.path.dirname(os.path.abspath(__file__))
        data_source = os.path.join(data_source, 'data')

    (x_train, y_train), (x_test, y_test) = load_data(
        origin=os.path.join(data_source, 'mnist.npz'),
        dest=param_dict['stage_in_destination'],
    )

    timer.end()
github deephyper / deephyper / benchmarks / b2 / babi_memnn.py View on Github external
def run(param_dict):
    param_dict = keras_cmdline.fill_missing_defaults(augment_parser, param_dict)
    optimizer = keras_cmdline.return_optimizer(param_dict)
    pprint(param_dict)
    start_time = time.time()
    challenges = {
        # QA1 with 10,000 samples
        'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
        # QA2 with 10,000 samples
        'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
    }
    challenge_type = 'single_supporting_fact_10k'
    challenge = challenges[challenge_type]
    
    timer.start('stage in')
    if param_dict['data_source']:
        data_source = param_dict['data_source']
    else: