Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
default=0)
for i in range(num_layers):
units = hp.Choice(
'units_{i}'.format(i=i),
[16, 32, 64, 128, 256, 512, 1024],
default=32)
output_node = tf.keras.layers.Dense(units)(output_node)
if use_batchnorm:
output_node = tf.keras.layers.BatchNormalization()(output_node)
output_node = tf.keras.layers.ReLU()(output_node)
output_node = tf.keras.layers.Dropout(dropout_rate)(output_node)
return output_node
class RNNBlock(base.Block):
"""An RNN Block.
# Arguments
return_sequences: Boolean. Whether to return the last output in the
output sequence, or the full sequence. Defaults to False.
bidirectional: Boolean. Bidirectional RNN. If left unspecified, it will be
tuned automatically.
num_layers: Int. The number of layers in RNN. If left unspecified, it will
be tuned automatically.
layer_type: String. 'gru' or 'lstm'. If left unspecified, it will be tuned
automatically.
"""
def __init__(self,
return_sequences=False,
bidirectional=None,
self.output_shape = output_shape
def build(self, hp, inputs=None):
"""Build the HyperModel instead of Keras Model.
# Arguments
hp: HyperParameters. The hyperparameters for building the model.
inputs: A list of instances of Node.
# Returns
An Node instance, the output node of the output Block.
"""
raise NotImplementedError
class Preprocessor(Block):
"""Hyper preprocessing block base class.
It extends Block which extends Hypermodel. A preprocessor is a Hypermodel, which
means it is a search space. However, different from other Hypermodels, it is
also a model which can be fit.
"""
def build(self, hp):
"""Get the values of the required HyperParameters.
It does not build and return a Keras Model, but initialize the
HyperParameters for the preprocessor to be fit.
"""
pass
def update(self, x, y=None):
return tf.keras.layers.Concatenate()(inputs)
class Flatten(base.Block):
"""Flatten the input tensor with Keras Flatten layer."""
def build(self, hp, inputs=None):
inputs = nest.flatten(inputs)
utils.validate_num_inputs(inputs, 1)
input_node = inputs[0]
if len(input_node.shape) > 2:
return tf.keras.layers.Flatten()(input_node)
return input_node
class SpatialReduction(base.Block):
"""Reduce the dimension of a spatial tensor, e.g. image, to a vector.
# Arguments
reduction_type: String. 'flatten', 'global_max' or 'global_avg'.
If left unspecified, it will be tuned automatically.
"""
def __init__(self, reduction_type=None, **kwargs):
super().__init__(**kwargs)
self.reduction_type = reduction_type
def get_config(self):
config = super().get_config()
config.update({'reduction_type': self.reduction_type})
return config
padding=self._get_padding(kernel_size, output_node),
activation='relu')(output_node)
output_node = pool(
kernel_size - 1,
padding=self._get_padding(kernel_size - 1, output_node))(output_node)
return output_node
@staticmethod
def _get_padding(kernel_size, output_node):
if (kernel_size * 2 <= output_node.shape[1] and
kernel_size * 2 <= output_node.shape[2]):
return 'valid'
return 'same'
class ResNetBlock(base.Block, resnet.HyperResNet):
"""Block for ResNet.
# Arguments
version: String. 'v1', 'v2' or 'next'. The type of ResNet to use.
If left unspecified, it will be tuned automatically.
pooling: String. 'avg', 'max'. The type of pooling layer to use.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
version=None,
pooling=None,
**kwargs):
super().__init__(include_top=False, input_shape=(10,), **kwargs)
self.version = version
self.pooling = pooling
def build(self, hp, inputs=None):
self.input_tensor = nest.flatten(inputs)[0]
self.input_shape = None
hp.Choice('version', ['v1', 'v2', 'next'], default='v2')
hp.Choice('pooling', ['avg', 'max'], default='avg')
set_hp_value(hp, 'version', self.version)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
class XceptionBlock(base.Block, xception.HyperXception):
"""XceptionBlock.
An Xception structure, used for specifying your model with specific datasets.
The original Xception architecture is from https://arxiv.org/abs/1610.02357.
The data first goes through the entry flow, then through the middle flow which
is repeated eight times, and finally through the exit flow.
This XceptionBlock returns a similar architecture as Xception except without
the last (optional) fully connected layer(s) and logistic regression.
The size of this architecture could be decided by `HyperParameters`, to get an
architecture with a half, an identical, or a double size of the original one.
# Arguments
activation: String. 'selu' or 'relu'. If left unspecified, it will be tuned
automatically.
def get_config(self):
"""Get the configuration of the preprocessor.
# Returns
A dictionary of configurations of the preprocessor.
"""
return {'name': self.name}
def get_state(self):
return {}
def set_state(self, state):
pass
class Head(Block):
"""Base class for the heads, e.g. classification, regression.
# Arguments
loss: A Keras loss function. Defaults to None. If None, the loss will be
inferred from the AutoModel.
metrics: A list of Keras metrics. Defaults to None. If None, the metrics will
be inferred from the AutoModel.
output_shape: Tuple of int(s). Defaults to None. If None, the output shape
will be inferred from the AutoModel.
"""
def __init__(self, loss=None, metrics=None, output_shape=None, **kwargs):
super().__init__(**kwargs)
self.output_shape = output_shape
self.loss = loss
self.metrics = metrics
return y
if isinstance(y, np.ndarray):
if len(y.shape) == 1:
y = y.reshape(-1, 1)
return tf.data.Dataset.from_tensor_slices(y)
if isinstance(y, pd.DataFrame):
return tf.data.Dataset.from_tensor_slices(y.values)
if isinstance(y, pd.Series):
return tf.data.Dataset.from_tensor_slices(y.values.reshape(-1, 1))
def postprocess(self, y):
"""Postprocess the output of the Keras Model."""
return y
class HyperBlock(Block):
"""HyperBlock uses hyperparameters to decide inner Block graph.
A HyperBlock should be build into connected Blocks instead of individual Keras
layers. The main purpose of creating the HyperBlock class is for the ease of
parsing the graph for preprocessors. The graph would be hard to parse if a Block,
whose inner structure is decided by hyperparameters dynamically, contains both
preprocessors and Keras layers.
When the preprocessing layers of Keras are ready to cover all the preprocessors
in AutoKeras, the preprocessors should be handled by the Keras Model. The
HyperBlock class should be removed. The subclasses should extend Block class
directly and the build function should build connected Keras layers instead of
Blocks.
# Arguments
output_shape: Tuple of int(s). Defaults to None. If None, the output shape
'global_avg'],
default='global_avg')
if reduction_type == 'flatten':
output_node = Flatten().build(hp, output_node)
elif reduction_type == 'global_max':
output_node = tf.math.reduce_max(output_node, axis=-2)
elif reduction_type == 'global_avg':
output_node = tf.math.reduce_mean(output_node, axis=-2)
elif reduction_type == 'global_min':
output_node = tf.math.reduce_min(output_node, axis=-2)
return output_node
class EmbeddingBlock(base.Block):
"""Word embedding block for sequences.
The input should be tokenized sequences with the same length, where each element
of a sequence should be the index of the word.
# Arguments
max_features: Int. Size of the vocabulary. Must be set if not using
TextToIntSequence before this block. If not specified, we will use the
vocabulary size in the preceding TextToIntSequence vocabulary size.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
embedding_dim: Int. If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for after the Embedding layer.
If left unspecified, it will be tuned automatically.
"""
set_hp_value(hp, 'num_residual_blocks', self.num_residual_blocks)
set_hp_value(hp, 'pooling', self.pooling)
model = super().build(hp)
return model.outputs
def shape_compatible(shape1, shape2):
if len(shape1) != len(shape2):
return False
# TODO: If they can be the same after passing through any layer,
# they are compatible. e.g. (32, 32, 3), (16, 16, 2) are compatible
return shape1[:-1] == shape2[:-1]
class Merge(base.Block):
"""Merge block to merge multiple nodes into one.
# Arguments
merge_type: String. 'add' or 'concatenate'. If left unspecified, it will be
tuned automatically.
"""
def __init__(self, merge_type=None, **kwargs):
super().__init__(**kwargs)
self.merge_type = merge_type
def get_config(self):
config = super().get_config()
config.update({'merge_type': self.merge_type})
return config
import tensorflow as tf
from kerastuner.applications import resnet
from kerastuner.applications import xception
from tensorflow.python.util import nest
from autokeras import utils
from autokeras.hypermodel import base
def set_hp_value(hp, name, value):
full_name = hp._get_name(name)
hp.values[full_name] = value or hp.values[full_name]
class DenseBlock(base.Block):
"""Block for Dense layers.
# Arguments
num_layers: Int. The number of Dense layers in the block.
If left unspecified, it will be tuned automatically.
use_bn: Boolean. Whether to use BatchNormalization layers.
If left unspecified, it will be tuned automatically.
dropout_rate: Float. The dropout rate for the layers.
If left unspecified, it will be tuned automatically.
"""
def __init__(self,
num_layers=None,
use_batchnorm=None,
dropout_rate=None,
**kwargs):