Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def run(param_dict=None, verbose=2):
"""Run a param_dict on the reutersmlp benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict["activation1"], param_dict["alpha1"])
ACTIVATION2 = util.get_activation_instance(param_dict["activation2"], param_dict["alpha2"])
ACTIVATION3 = util.get_activation_instance(param_dict["activation3"], param_dict["alpha3"])
ACTIVATION4 = util.get_activation_instance(param_dict["activation4"], param_dict["alpha4"])
ACTIVATION5 = util.get_activation_instance(param_dict["activation5"], param_dict["alpha5"])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
MAX_WORDS = param_dict["max_words"]
NHIDDEN = param_dict['nhidden']
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
SKIP_TOP = param_dict["skip_top"]
# Other
model_path = param_dict["model_path"]
'''Compute classification accuracy with a fixed threshold on distances.
'''
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
(x_train, y_train), (x_test, y_test) = mnist.load_data()
timer.end()
num_classes = 10
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
DROPOUT = param_dict['dropout']
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
UNITS = param_dict['units']
OPTIMIZER = util.get_optimizer_instance(param_dict)
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
timer.start('preprocessing')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
"""Run a param_dict on the MNISTCNN benchmark."""
# Read in values from CLI if no param_dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the filled in param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
MAX_POOL = param_dict["max_pool"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
PADDING_P1 = param_dict["padding_p1"]
PADDING_P2 = param_dict["padding_p2"]
P_SIZE = param_dict["p_size"]
"""Run a param_dict on the cifar10 benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DATA_AUGMENTATION = param_dict["data_augmentation"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
P_SIZE = param_dict["p_size"]
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
PADDING_P1 = param_dict["padding_p1"]
PADDING_P2 = param_dict["padding_p2"]
STRIDE1 = param_dict["stride1"]
def run(param_dict=None, verbose=2):
"""Run a param_dict on the cifar10 benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DATA_AUGMENTATION = param_dict["data_augmentation"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
P_SIZE = param_dict["p_size"]
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
def run(param_dict=None, verbose=2):
"""Run a param_dict on the reutersmlp benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict["activation1"], param_dict["alpha1"])
ACTIVATION2 = util.get_activation_instance(param_dict["activation2"], param_dict["alpha2"])
ACTIVATION3 = util.get_activation_instance(param_dict["activation3"], param_dict["alpha3"])
ACTIVATION4 = util.get_activation_instance(param_dict["activation4"], param_dict["alpha4"])
ACTIVATION5 = util.get_activation_instance(param_dict["activation5"], param_dict["alpha5"])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
MAX_WORDS = param_dict["max_words"]
NHIDDEN = param_dict['nhidden']
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
SKIP_TOP = param_dict["skip_top"]
# Other
model_path = param_dict["model_path"]
# Constants
patience = math.ceil(EPOCHS/2)
callbacks = [
# data_source = os.path.dirname(os.path.abspath(__file__))
# data_source = os.path.join(data_source, 'data')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
timer.end()
#hyperparameters
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
DROPOUT = param_dict['dropout']
ACTIVATION = util.get_activation_instance(param_dict['activation'], param_dict['alpha'])
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
NHIDDEN = param_dict['nhidden']
NUNITS = param_dict['nunits']
OPTIMIZER = util.get_optimizer_instance(param_dict)
#other
LOSS_FUNCTION = param_dict['loss_function']
METRICS = param_dict['metrics']
model_path = ''
#constants
num_classes = 10
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
def run(param_dict=None, verbose=2):
"""Run a param_dict on the reutersmlp benchmark."""
# Read in values from CLI if no param dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the parsed param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION = util.get_activation_instance(param_dict)
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
MAX_WORDS = param_dict["max_words"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
SKIP_TOP = param_dict["skip_top"]
# Other
model_path = param_dict["model_path"]
# Constants
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
def run(param_dict=None, verbose=2):
"""Run a param_dict on the MNISTCNN benchmark."""
# Read in values from CLI if no param_dict was specified and clean up the param dict.
param_dict = util.handle_cli(param_dict, build_parser())
# Display the filled in param dict.
if verbose:
print("PARAM_DICT_CLEAN=")
pprint(param_dict)
# Get values from param_dict.
# Hyperparameters
ACTIVATION1 = util.get_activation_instance(param_dict['activation1'], param_dict['alpha1'])
ACTIVATION2 = util.get_activation_instance(param_dict['activation2'], param_dict['alpha2'])
ACTIVATION3 = util.get_activation_instance(param_dict['activation3'], param_dict['alpha3'])
ACTIVATION4 = util.get_activation_instance(param_dict['activation4'], param_dict['alpha4'])
ACTIVATION5 = util.get_activation_instance(param_dict['activation5'], param_dict['alpha5'])
BATCH_SIZE = param_dict["batch_size"]
DROPOUT = param_dict["dropout"]
EPOCHS = param_dict["epochs"]
F1_SIZE = param_dict["f1_size"]
F2_SIZE = param_dict["f2_size"]
F1_UNITS = param_dict["f1_units"]
F2_UNITS = param_dict["f2_units"]
MAX_POOL = param_dict["max_pool"]
NUNITS = param_dict["nunits"]
OPTIMIZER = util.get_optimizer_instance(param_dict)
PADDING_C1 = param_dict["padding_c1"]
PADDING_C2 = param_dict["padding_c2"]
# else:
# data_source = os.path.dirname(os.path.abspath(__file__))
# data_source = os.path.join(data_source, 'data')
ngram_range = 1
MAX_FEATURES = param_dict['max_features'] # = 20000
MAXLEN = param_dict['maxlen'] # = 400
ENBEDDING_DIMS = param_dict['embedding_dims'] # = 50
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=MAX_FEATURES)
timer.end()
BATCH_SIZE = param_dict['batch_size']
EPOCHS = param_dict['epochs']
ACTIVATION = util.get_activation_instance(param_dict)
OPTIMIZER = util.get_optimizer_instance(param_dict)
#constants
patience = math.ceil(EPOCHS/2)
callbacks = [
EarlyStopping(monitor="val_acc", min_delta=0.0001, patience=patience, verbose=verbose, mode="auto"),
TerminateOnNaN()]
timer.start('preprocessing')
if ngram_range > 1:
print('Adding {}-gram features'.format(ngram_range))
ngram_set = set()
for input_list in x_train: