How to use the edward2.experimental.rank1_bnns.utils.make_initializer function in edward2

To help you get started, we’ve selected a few edward2 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
super().__init__()
    self.hidden_layer_dim = hidden_layer_dim

    # 1. RNN layer.
    cells = []
    for _ in range(num_rnn_layers):
      # TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)

    # 2. Affine layer on combination of RNN output and context features.
    if self.hidden_layer_dim > 0:
      self.hidden_layer = rank1_bnn_layers.DenseRank1(
          self.hidden_layer_dim,
          activation=tf.nn.relu6,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          kernel_initializer="he_normal",
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)

    # 3. Output affine layer.
    self.output_layer = rank1_bnn_layers.DenseRank1(
        output_layer_dim,
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
the LSTM cell, and for clipping of all aggregated gradients.
      return_sequences: Whether or not to return outputs at each time step from
        the LSTM, rather than just the final time step.
    """
    super().__init__()
    self.hidden_layer_dim = hidden_layer_dim

    # 1. RNN layer.
    cells = []
    for _ in range(num_rnn_layers):
      # TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)

    # 2. Affine layer on combination of RNN output and context features.
    if self.hidden_layer_dim > 0:
      self.hidden_layer = rank1_bnn_layers.DenseRank1(
          self.hidden_layer_dim,
          activation=tf.nn.relu6,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          kernel_initializer="he_normal",
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)

    # 3. Output affine layer.
    self.output_layer = rank1_bnn_layers.DenseRank1(
        output_layer_dim,
        alpha_initializer=rank1_utils.make_initializer(
            alpha_initializer, random_sign_init, dropout_rate),
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
the LSTM, rather than just the final time step.
    """
    super().__init__()
    self.hidden_layer_dim = hidden_layer_dim

    # 1. RNN layer.
    cells = []
    for _ in range(num_rnn_layers):
      # TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)