How to use the mxnet.sym.FullyConnected function in mxnet

To help you get started, we’ve selected a few mxnet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github eldercrow / additions_mxnet / rcnn / rcnn / symbol / symbol_pva100_mpii.py View on Github external
# classification
    cls_score = mx.sym.FullyConnected(fc7_relu, name='cls_score', num_hidden=num_classes)
    cls_prob = mx.sym.SoftmaxOutput(data=cls_score, label=label, name='cls_prob',
            use_ignore=True, ignore_label=-1, normalization='batch')
    # bounding box regression
    bbox_pred = mx.sym.FullyConnected(fc7_relu, name='bbox_pred', num_hidden=num_classes*4)
    bbox_loss_ = bbox_weight * \
            mx.sym.smooth_l1(bbox_pred - bbox_target, name='bbox_loss_', scalar=1.0)
    bbox_loss = mx.sym.MakeLoss(bbox_loss_, name='bbox_loss', grad_scale=1.0 / config.TRAIN.BATCH_ROIS)

    # head classification
    head_score = mx.sym.FullyConnected(fc7_relu, name='head_score', num_hidden=num_grid)
    head_prob = mx.sym.SoftmaxOutput(head_score, label=head_gid, name='head_prob', normalization='batch',
            use_ignore=True, ignore_label=-1)
    head_bbox_pred = mx.sym.FullyConnected(fc7_relu, name='head_pred', num_hidden=num_grid*4)
    head_bbox_loss_ = head_weight * \
            mx.sym.smooth_l1(head_bbox_pred - head_target, name='bbox_loss_', scalar=1.0)
    head_bbox_loss = mx.sym.MakeLoss(head_bbox_loss_, name='head_bbox_loss', grad_scale=0.1 / config.TRAIN.BATCH_ROIS)

    # joint classification
    joint_score = mx.sym.FullyConnected(fc7_relu, name='joint_score', num_hidden=num_grid*4)
    joint_probs = []
    joint_gids = []
    for i in range(4):
        sidx = i * num_grid
        eidx = (i+1) * num_grid
        scorei = mx.sym.slice_axis(joint_score, axis=1, begin=sidx, end=eidx)
        labeli = mx.sym.slice_axis(joint_gid, axis=1, begin=i, end=i+1)
        labeli = mx.sym.reshape(labeli, (-1,))
        joint_gids.append(labeli)
        joint_probs.append(mx.sym.SoftmaxOutput( \
github hpi-xnor / BMXNet-v2 / example / ctc / lstm.py View on Github external
def _lstm(num_hidden, indata, prev_state, param, seqidx, layeridx):
    """LSTM Cell symbol"""
    i2h = mx.sym.FullyConnected(data=indata,
                                weight=param.i2h_weight,
                                bias=param.i2h_bias,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_i2h" % (seqidx, layeridx))
    h2h = mx.sym.FullyConnected(data=prev_state.h,
                                weight=param.h2h_weight,
                                bias=param.h2h_bias,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_h2h" % (seqidx, layeridx))
    gates = i2h + h2h
    slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
                                      name="t%d_l%d_slice" % (seqidx, layeridx))
    in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
    in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
    forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid")
    out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid")
    next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
    next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
    return LSTMState(c=next_c, h=next_h)
github luoyetx / mx-lsoftmax / mnist.py View on Github external
pool2 = mx.sym.Pooling(data=relu2, kernel=(2, 2), stride=(2, 2), pool_type='max')
    fc3 = mx.sym.FullyConnected(data=pool2, num_hidden=256)
    relu3 = mx.sym.Activation(data=fc3, act_type='relu')

    embedding = mx.sym.FullyConnected(data=relu3, num_hidden=2, name='embedding')
    if not args.no_lsoftmax:
        if args.op_impl == 'cpp':
            fc4 = mx.sym.LSoftmax(data=embedding, label=label, num_hidden=10,
                                  beta=args.beta, margin=args.margin, scale=args.scale,
                                  beta_min=args.beta_min, verbose=True)
        else:
            fc4 = mx.sym.Custom(data=embedding, label=label, num_hidden=10,
                                beta=args.beta, margin=args.margin, scale=args.scale,
                                beta_min=args.beta_min, op_type='LSoftmax')
    else:
        fc4 = mx.sym.FullyConnected(data=embedding, num_hidden=10, no_bias=True)
    softmax_loss = mx.sym.SoftmaxOutput(data=fc4, label=label)
    return softmax_loss
github geek-ai / MAgent / python / magent / builtin / mx_model / dqn.py View on Github external
h_conv1 = mx.sym.Convolution(data=input_view, kernel=(3, 3),
                                         num_filter=kernel_num[0], layout="NCHW")
            h_conv1 = mx.sym.Activation(data=h_conv1, act_type="relu")
            h_conv2 = mx.sym.Convolution(data=h_conv1, kernel=(3, 3),
                                         num_filter=kernel_num[1], layout="NCHW")
            h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
        else:
            input_view = mx.sym.flatten(data=input_view)
            h_conv2 = mx.sym.FullyConnected(input_view, num_hidden=hidden_size[0])
            h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")

        flatten_view = mx.sym.flatten(data=h_conv2)
        h_view = mx.sym.FullyConnected(data=flatten_view, num_hidden=hidden_size[0])
        h_view = mx.sym.Activation(data=h_view, act_type="relu")

        h_emb = mx.sym.FullyConnected(data=input_feature, num_hidden=hidden_size[0])
        h_emb = mx.sym.Activation(data=h_emb, act_type="relu")

        dense = mx.sym.concat(h_view, h_emb)

        if self.use_dueling:
            # state value
            value = mx.sym.FullyConnected(data=dense, num_hidden=1)
            advantage = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)

            mean = mx.sym.mean(advantage, axis=1, keepdims=True)
            advantage = mx.sym.broadcast_sub(advantage, mean)
            qvalues = mx.sym.broadcast_add(advantage, value)
        else:
            qvalues = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)

        return qvalues
github hpi-xnor / BMXNet-v2 / example / speech-demo / lstm_proj.py View on Github external
def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., num_hidden_proj=0):
    """LSTM Cell symbol"""
    if dropout > 0.:
        indata = mx.sym.Dropout(data=indata, p=dropout)

    i2h = mx.sym.FullyConnected(data=indata,
                                weight=param.i2h_weight,
                                bias=param.i2h_bias,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_i2h" % (seqidx, layeridx))
    h2h = mx.sym.FullyConnected(data=prev_state.h,
                                weight=param.h2h_weight,
                                #bias=param.h2h_bias,
                                no_bias=True,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_h2h" % (seqidx, layeridx))
    gates = i2h + h2h
    slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
                                      name="t%d_l%d_slice" % (seqidx, layeridx))

    Wcidc = mx.sym.broadcast_mul(param.c2i_bias,  prev_state.c) + slice_gates[0]
    in_gate = mx.sym.Activation(Wcidc, act_type="sigmoid")
    in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")

    Wcfdc = mx.sym.broadcast_mul(param.c2f_bias, prev_state.c) + slice_gates[2]
    forget_gate = mx.sym.Activation(Wcfdc, act_type="sigmoid")
    next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
github deepinsight / insightface / recognition / train.py View on Github external
def get_symbol(args):
  embedding = eval(config.net_name).get_symbol()
  all_label = mx.symbol.Variable('softmax_label')
  gt_label = all_label
  is_softmax = True
  if config.loss_name=='softmax': #softmax
    _weight = mx.symbol.Variable("fc7_weight", shape=(config.num_classes, config.emb_size), 
        lr_mult=config.fc7_lr_mult, wd_mult=config.fc7_wd_mult, init=mx.init.Normal(0.01))
    if config.fc7_no_bias:
      fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, no_bias = True, num_hidden=config.num_classes, name='fc7')
    else:
      _bias = mx.symbol.Variable('fc7_bias', lr_mult=2.0, wd_mult=0.0)
      fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, bias = _bias, num_hidden=config.num_classes, name='fc7')
  elif config.loss_name=='margin_softmax':
    _weight = mx.symbol.Variable("fc7_weight", shape=(config.num_classes, config.emb_size), 
        lr_mult=config.fc7_lr_mult, wd_mult=config.fc7_wd_mult, init=mx.init.Normal(0.01))
    s = config.loss_s
    _weight = mx.symbol.L2Normalization(_weight, mode='instance')
    nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
    fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=config.num_classes, name='fc7')
    if config.loss_m1!=1.0 or config.loss_m2!=0.0 or config.loss_m3!=0.0:
      if config.loss_m1==1.0 and config.loss_m2==0.0:
        s_m = s*config.loss_m3
        gt_one_hot = mx.sym.one_hot(gt_label, depth = config.num_classes, on_value = s_m, off_value = 0.0)
        fc7 = fc7-gt_one_hot
      else:
github deepinsight / insightface / src / train.py View on Github external
if m>0.0:
        if args.margin_verbose>0:
          zy = mx.sym.pick(fc7, gt_label, axis=1)
          cos_t = zy/s
          margin_symbols.append(mx.symbol.mean(cos_t))

        s_m = s*m
        gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = s_m, off_value = 0.0)
        fc7 = fc7-gt_one_hot

        if args.margin_verbose>0:
          new_zy = mx.sym.pick(fc7, gt_label, axis=1)
          new_cos_t = new_zy/s
          margin_symbols.append(mx.symbol.mean(new_cos_t))
    else:
      fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, no_bias = True, num_hidden=args.num_classes, name='fc7')
      if m>0.0:
        body = embedding*embedding
        body = mx.sym.sum_axis(body, axis=1, keepdims=True)
        body = mx.sym.sqrt(body)
        body = body*m
        gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
        body = mx.sym.broadcast_mul(gt_one_hot, body)
        fc7 = fc7-body

  elif args.loss_type==3:
    s = args.margin_s
    m = args.margin_m
    assert args.margin==2 or args.margin==4
    _weight = mx.symbol.Variable("fc7_weight", shape=(args.num_classes, args.emb_size), lr_mult=1.0)
    _weight = mx.symbol.L2Normalization(_weight, mode='instance')
    nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')*s
github peisuke / DeepLearningSpeedComparison / mxnet / vgg16 / predict.py View on Github external
h = mx.sym.Convolution(h, kernel=(3, 3), pad=(1, 1), num_filter=512, name = "conv5-1")
    h = mx.sym.Activation(h, name='relu5-1', act_type="relu")
    h = mx.sym.Convolution(h, kernel=(3, 3), pad=(1, 1), num_filter=512, name = "conv5-2")
    h = mx.sym.Activation(h, name='relu5-2', act_type="relu")
    h = mx.sym.Convolution(h, kernel=(3, 3), pad=(1, 1), num_filter=512, name = "conv5-3")
    h = mx.sym.Activation(h, name='relu5-3', act_type="relu")
    h = mx.sym.Pooling(h, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool5")
    
    h = mx.sym.Flatten(h)
    
    h = mx.sym.FullyConnected(h, name='fc6', num_hidden = 4096)
    h = mx.sym.Activation(h, name='relu6', act_type="relu")
    h = mx.sym.FullyConnected(h, name='fc7', num_hidden = 4096)
    h = mx.sym.Activation(h, name='relu7', act_type="relu")
    h = mx.sym.FullyConnected(h, name='fc8', num_hidden=1000)
    
    return mx.sym.softmax(h)
github samsungsds-rnd / deepspeech.mxnet / stt_layer_lstm.py View on Github external
def vanilla_lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, is_batchnorm=False, gamma=None, beta=None, name=None):
    """LSTM Cell symbol"""
    i2h = mx.sym.FullyConnected(data=indata,
                                weight=param.i2h_weight,
                                bias=param.i2h_bias,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_i2h" % (seqidx, layeridx))
    if is_batchnorm:
        if name is not None:
            i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)
        else:
            i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
    h2h = mx.sym.FullyConnected(data=prev_state.h,
                                weight=param.h2h_weight,
                                bias=param.h2h_bias,
                                num_hidden=num_hidden * 4,
                                name="t%d_l%d_h2h" % (seqidx, layeridx))
    gates = i2h + h2h
    slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
github edmBernard / mxnet_example_shared_weight / demo_shared_partial_with_super_symbol_v1.py View on Github external
def get_module2_symbols(data):
    # Shared
    fc1 = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=128)
    act1 = mx.sym.Activation(data=fc1, name='act1', act_type="relu")
    fc2 = mx.sym.FullyConnected(data=act1, name='fc2', num_hidden=64)
    act2 = mx.sym.Activation(data=fc2, name='act2', act_type="relu")

    # Module 1
    fc3_2 = mx.sym.FullyConnected(data=act2, name='fc3_2', num_hidden=10)
    mlps2 = mx.sym.SoftmaxOutput(data=fc3_2, name='softmax')
    
    return mlps2