Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, name=None, **kwargs):
super().__init__(**kwargs)
if not name:
prefix = self.__class__.__name__
name = prefix + '_' + str(tf.keras.backend.get_uid(prefix))
name = utils.to_snake_case(name)
self.name = name
self.inputs = None
self.outputs = None
self._num_output_node = 1
def build(self, hp, inputs=None):
if self.identity:
return IdentityLayer(name=self.name)(inputs)
if self.output_dim and self.output_shape[-1] != self.output_dim:
raise ValueError(
'The data doesn\'t match the output_dim. '
'Expecting {} but got {}'.format(self.output_dim,
self.output_shape[-1]))
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
dropout_rate = self.dropout_rate or hp.Choice('dropout_rate',
[0.0, 0.25, 0.5],
default=0)
if dropout_rate > 0:
output_node = tf.keras.layers.Dropout(dropout_rate)(output_node)
output_node = block_module.Flatten().build(hp, output_node)
output_node = tf.keras.layers.Dense(self.output_shape[-1],
name=self.name)(output_node)
return output_node
kernel_size = self.kernel_size or hp.Choice('kernel_size',
[3, 5, 7],
default=3)
num_blocks = self.num_blocks or hp.Choice('num_blocks',
[1, 2, 3],
default=2)
separable = self.separable
if separable is None:
separable = hp.Choice('separable', [True, False], default=False)
if separable:
conv = utils.get_sep_conv(input_node.shape)
else:
conv = utils.get_conv(input_node.shape)
pool = utils.get_max_pooling(input_node.shape)
for i in range(num_blocks):
output_node = conv(
hp.Choice('filters_{i}_1'.format(i=i),
[16, 32, 64],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
output_node = conv(
hp.Choice('filters_{i}_2'.format(i=i),
[16, 32, 64],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
output_node = input_node
output_node = hyper_block.Flatten().build(hp, output_node)
output_node = tf.keras.layers.Dense(self.output_shape[-1])(output_node)
if self.binary:
output_node = tf.keras.activations.sigmoid(output_node)
else:
output_node = tf.keras.layers.Softmax()(output_node)
return output_node
input_node = inputs[0]
output_node = input_node
# No need to reduce.
if len(output_node.shape) <= 2:
return output_node
reduction_type = self.reduction_type or hp.Choice('reduction_type',
['flatten',
'global_max',
'global_avg'],
default='global_avg')
if reduction_type == 'flatten':
output_node = Flatten().build(hp, output_node)
elif reduction_type == 'global_max':
output_node = utils.get_global_max_pooling(
output_node.shape)()(output_node)
elif reduction_type == 'global_avg':
output_node = utils.get_global_average_pooling(
output_node.shape)()(output_node)
return output_node
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
if len(input_node.shape) > 2:
return tf.keras.layers.Flatten()(input_node)
return input_node
def _record_dataset_shape(self, dataset):
self.output_shape = utils.dataset_shape(dataset)
def _record_dataset_shape(self, dataset):
self.shape = utils.dataset_shape(dataset)
def set_state(self, state):
super().set_state(state)
for key, label_encoder in state['label_encoders'].items():
self.label_encoders[key] = encoder.deserialize(label_encoder)
for key, label_encoder_state in state['label_encoders_state'].items():
self.label_encoders[key].set_state(label_encoder_state)
self.column_names = state['column_names']
self.column_types = state['column_types']
self.num_columns = state['num_columns']
self.shape = state['shape']
self.num_rows = state['num_rows']
self.categorical_col = state['categorical_col']
self.numerical_col = state['numerical_col']
self.value_counters = utils.to_type_key(state['value_counters'], int)
self.categorical_categorical = utils.to_type_key(
state['categorical_categorical'], ast.literal_eval)
self.numerical_categorical = utils.to_type_key(
state['numerical_categorical'], ast.literal_eval)
self.count_frequency = utils.to_type_key(state['count_frequency'], int)
self.high_level1_col = state['high_level1_col']
self.high_level2_col = state['high_level2_col']
self.high_level_cat_cat = utils.to_type_key(
state['high_level_cat_cat'], ast.literal_eval)
self.high_level_num_cat = utils.to_type_key(
state['high_level_num_cat'], ast.literal_eval)
output_node = input_node
kernel_size = self.kernel_size or hp.Choice('kernel_size',
[3, 5, 7],
default=3)
num_blocks = self.num_blocks or hp.Choice('num_blocks',
[1, 2, 3],
default=2)
separable = self.separable
if separable is None:
separable = hp.Choice('separable', [True, False], default=False)
if separable:
conv = utils.get_sep_conv(input_node.shape)
else:
conv = utils.get_conv(input_node.shape)
pool = utils.get_max_pooling(input_node.shape)
for i in range(num_blocks):
output_node = conv(
hp.Choice('filters_{i}_1'.format(i=i),
[16, 32, 64],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
output_node = conv(
hp.Choice('filters_{i}_2'.format(i=i),
[16, 32, 64],
default=32),
kernel_size,
padding=self._get_padding(kernel_size, output_node),