How to use the dnn.DNN function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yajiemiao / pdnn / models / dnn_sat.py View on Github external
# allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y')
    
        # we assume that i-vectors are appended to speech features in a frame-wise manner  
        self.feat_dim = cfg_si.n_ins
        self.ivec_dim = cfg_adapt.n_ins
        self.iv = self.x[:,self.feat_dim:self.feat_dim+self.ivec_dim]
        self.feat = self.x[:,0:self.feat_dim]
        
        # the parameters 
        self.params = []            # the params to be updated in the current training
        self.delta_params = []
        
        # the i-vector network
        dnn_adapt = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg_adapt, input  = self.iv)
        self.dnn_adapt = dnn_adapt

        # the final output layer which has the same dimension as the input features
        linear_func = lambda x: x
        adapt_output_layer = HiddenLayer(rng=numpy_rng,
                                 input=dnn_adapt.layers[-1].output,
                                 n_in=cfg_adapt.hidden_layers_sizes[-1],
                                 n_out=self.feat_dim,
                                 activation=linear_func)
        dnn_adapt.layers.append(adapt_output_layer)
        dnn_adapt.params.extend(adapt_output_layer.params)
        dnn_adapt.delta_params.extend(adapt_output_layer.delta_params)

        dnn_si = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = cfg_si, input = self.feat + dnn_adapt.layers[-1].output)
        self.dnn_si = dnn_si
github yajiemiao / pdnn / models / dnn_2tower.py View on Github external
if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y') 
       
        self.input_tower1 = self.x[:,0:cfg_tower1.n_ins]
        self.input_tower2 = self.x[:,cfg_tower1.n_ins:(cfg_tower1.n_ins+cfg_tower2.n_ins)]
 
        self.dnn_tower1 = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg_tower1,
                              input  = self.input_tower1)
        self.dnn_tower2 = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg_tower2,
                              input  = self.input_tower2)
        concat_output = T.concatenate([self.dnn_tower1.layers[-1].output, self.dnn_tower2.layers[-1].output], axis=1)
        self.dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg, input  = concat_output)

        self.layers.extend(self.dnn_tower1.layers); self.params.extend(self.dnn_tower1.params); 
        self.delta_params.extend(self.dnn_tower1.delta_params)
        self.layers.extend(self.dnn_tower2.layers); self.params.extend(self.dnn_tower2.params);
        self.delta_params.extend(self.dnn_tower2.delta_params)
        self.layers.extend(self.dnn.layers); self.params.extend(self.dnn.params);
        self.delta_params.extend(self.dnn.delta_params)

        self.finetune_cost = self.dnn.logLayer.negative_log_likelihood(self.y)
        self.errors = self.dnn.logLayer.errors(self.y)
github ML-on-structures / graph-lstm / mlslnn.py View on Github external
:param seed:
        :type seed:
        :param weight_range:
        :type weight_range:
        :return:
        :rtype:
        """
        self.mlsl_output_size = mlsl.output_sizes[-1] if outputs_from_mlsl else outputs_from_mlsl

        # Change input size of Neural net to assigned feature size plus MLSL outputs
        nnl[0]+=self.mlsl_output_size

        self.outputs_from_mlsl = outputs_from_mlsl

        self.mlsl = mlsl
        self.nnet = DNN()
        self.nnet.initialize(nnl=nnl,seed=seed, weight_range=weight_range)
        self.use_softmax = use_softmax
github vrenkens / nabu / nabu / neuralnetworks / models / ed_encoders / ed_encoder_factory.py View on Github external
Returns:
        an EDEncoder class'''

    if encoder == 'listener':
        import listener
        return listener.Listener
    elif encoder == 'dummy_encoder':
        import dummy_encoder
        return dummy_encoder.DummyEncoder
    elif encoder == 'dblstm':
        import dblstm
        return dblstm.DBLSTM
    elif encoder == 'dnn':
        import dnn
        return dnn.DNN
    elif encoder == 'hotstart_encoder':
        import hotstart_encoder
        return hotstart_encoder.HotstartEncoder
    else:
        raise Exception('undefined encoder type: %s' % encoder)
github supercoderhawk / DeepNLP / pipeline.py View on Github external
def get_cws(content, model_name):
  dnn = DNN('mlp', mode=TrainMode.Sentence, task='ner')
  ner = dnn.seg(content, model_path=model_name, ner=True, trans=True)[1]
  return ner
github supercoderhawk / DeepNLP / evaluate.py View on Github external
def evaluate_lstm():
  cws = DNN('lstm', is_seg=True)
  model = 'tmp/lstm-model100.ckpt'
  print(cws.seg('小明来自南京师范大学', model, debug=True))
  print(cws.seg('小明是上海理工大学的学生', model))
  print(cws.seg('迈向充满希望的新世纪', model))
  print(cws.seg('我爱北京天安门', model))
  print(cws.seg('多饮多尿多食', model))
  print(cws.seg('无明显小便泡沫增多,伴有夜尿3次。无明显双脚疼痛,无间歇性后跛行,无明显足部红肿破溃', model))
  # evaluate_model(cws, model)
github supercoderhawk / DeepNLP / dnn.py View on Github external
return self.tags2words(sentence, current_labels), current_labels
    else:
      # return self.tags2entities(sentence, current_labels), current_labels
      return None, current_labels
      # return self.tags2category_entities(sentence, current_labels), current_labels


if __name__ == '__main__':
  mlp = DNN('mlp', mode = TrainMode.Sentence, task = 'ner')
  mlp.train_exe()
  mlp_embed = DNN('mlp', mode = TrainMode.Sentence, task = 'ner',
                  is_embed = True)
  mlp_embed.train_exe()
  lstm = DNN('lstm', task = 'ner')
  lstm.train_exe()
  lstm_embed = DNN('lstm', task = 'ner', is_embed = True)
  lstm_embed.train_exe()
github supercoderhawk / DeepNLP / dnn.py View on Github external
print(sentence_scores)
      if self.type == 'lstm':
        output = self.sess.run(self.lstm_output, feed_dict = {self.input: seq})
        print(output[-1, :, 10])
      print(self.transition_init.eval(session = self.sess))
    current_labels = self.viterbi(sentence_scores, transition, transition_init)
    if not ner:
      return self.tags2words(sentence, current_labels), current_labels
    else:
      # return self.tags2entities(sentence, current_labels), current_labels
      return None, current_labels
      # return self.tags2category_entities(sentence, current_labels), current_labels


if __name__ == '__main__':
  mlp = DNN('mlp', mode = TrainMode.Sentence, task = 'ner')
  mlp.train_exe()
  mlp_embed = DNN('mlp', mode = TrainMode.Sentence, task = 'ner',
                  is_embed = True)
  mlp_embed.train_exe()
  lstm = DNN('lstm', task = 'ner')
  lstm.train_exe()
  lstm_embed = DNN('lstm', task = 'ner', is_embed = True)
  lstm_embed.train_exe()
github yajiemiao / pdnn / models / dnn_2tower.py View on Github external
self.delta_params   = []

        self.cfg = cfg
        self.cfg_tower1 = cfg_tower1
        self.cfg_tower2 = cfg_tower2

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')
        self.y = T.ivector('y') 
       
        self.input_tower1 = self.x[:,0:cfg_tower1.n_ins]
        self.input_tower2 = self.x[:,cfg_tower1.n_ins:(cfg_tower1.n_ins+cfg_tower2.n_ins)]
 
        self.dnn_tower1 = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg_tower1,
                              input  = self.input_tower1)
        self.dnn_tower2 = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg_tower2,
                              input  = self.input_tower2)
        concat_output = T.concatenate([self.dnn_tower1.layers[-1].output, self.dnn_tower2.layers[-1].output], axis=1)
        self.dnn = DNN(numpy_rng=numpy_rng, theano_rng = theano_rng, cfg = self.cfg, input  = concat_output)

        self.layers.extend(self.dnn_tower1.layers); self.params.extend(self.dnn_tower1.params); 
        self.delta_params.extend(self.dnn_tower1.delta_params)
        self.layers.extend(self.dnn_tower2.layers); self.params.extend(self.dnn_tower2.params);
        self.delta_params.extend(self.dnn_tower2.delta_params)
        self.layers.extend(self.dnn.layers); self.params.extend(self.dnn.params);
        self.delta_params.extend(self.dnn.delta_params)

        self.finetune_cost = self.dnn.logLayer.negative_log_likelihood(self.y)
        self.errors = self.dnn.logLayer.errors(self.y)
github supercoderhawk / DeepNLP / pipeline.py View on Github external
def get_ner(content, model_name):
  if model_name.startswith('tmp/mlp'):
    dnn = DNN('mlp', mode=TrainMode.Sentence, task='ner', is_seg=True)
  else:
    dnn = DNN('lstm', task='ner', is_seg=True)
  ner = dnn.seg(content, model_path=model_name, ner=True, trans=True)
  return ner[1]