How to use the edward2.experimental.rank1_bnns.utils.make_regularizer function in edward2

To help you get started, we’ve selected a few edward2 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
cells = []
    for _ in range(num_rnn_layers):
      # TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)

    # 2. Affine layer on combination of RNN output and context features.
    if self.hidden_layer_dim > 0:
      self.hidden_layer = rank1_bnn_layers.DenseRank1(
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
# 1. RNN layer.
    cells = []
    for _ in range(num_rnn_layers):
      # TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)

    # 2. Affine layer on combination of RNN output and context features.
github Google-Health / records-research / model-uncertainty / bayesian_rnn_model.py View on Github external
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
      lstm_cell = rank1_bnn_layers.LSTMCellRank1(
          rnn_dim,
          alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          recurrent_alpha_initializer=rank1_utils.make_initializer(
              alpha_initializer, random_sign_init, dropout_rate),
          recurrent_gamma_initializer=rank1_utils.make_initializer(
              gamma_initializer, random_sign_init, dropout_rate),
          alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          recurrent_alpha_regularizer=rank1_utils.make_regularizer(
              alpha_regularizer, prior_mean, prior_stddev),
          recurrent_gamma_regularizer=rank1_utils.make_regularizer(
              gamma_regularizer, prior_mean, prior_stddev),
          kernel_regularizer=tf.keras.regularizers.l2(l2),
          recurrent_regularizer=tf.keras.regularizers.l2(l2),
          bias_regularizer=tf.keras.regularizers.l2(l2),
          use_additive_perturbation=use_additive_perturbation,
          ensemble_size=ensemble_size)
      cells.append(lstm_cell)
    self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)

    # 2. Affine layer on combination of RNN output and context features.
    if self.hidden_layer_dim > 0:
      self.hidden_layer = rank1_bnn_layers.DenseRank1(
          self.hidden_layer_dim,
          activation=tf.nn.relu6,