How to use the mxnet.symbol function in mxnet

To help you get started, we’ve selected a few mxnet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hpi-xnor / BMXNet-v2 / tools / accnn / utils.py View on Github external
def sym_factory(node, data):
  name = node['name']
  params = {}
  if 'param' in node:
    for k, v in node['param'].items():
      try:
        params[k] = ast.literal_eval(v)
      except ValueError as e:
        params[k] = v
  return getattr(mx.symbol, node['op'])(data=data, name=name, **params)
github bruinxiong / SENet.mxnet / symbol_se_inception_resnet_v2.py View on Github external
def squeeze_excitation_block(name, data, num_filter, ratio):
    squeeze = mx.sym.Pooling(data=data, global_pool=True, kernel=(7, 7), pool_type='avg', name=name + '_squeeze')
    squeeze = mx.symbol.Flatten(data=squeeze, name=name + '_flatten')
    excitation = mx.symbol.FullyConnected(data=squeeze, num_hidden=int(num_filter*ratio), name=name + '_excitation1')
    excitation = mx.sym.Activation(data=excitation, act_type='relu', name=name + '_excitation1_relu')
    excitation = mx.symbol.FullyConnected(data=excitation, num_hidden=num_filter, name=name + '_excitation2')
    excitation = mx.sym.Activation(data=excitation, act_type='sigmoid', name=name + '_excitation2_sigmoid')
    scale = mx.symbol.broadcast_mul(data, mx.symbol.reshape(data=excitation, shape=(-1, num_filter, 1, 1)))

    return scale
github deepinx / deep-face-alignment / symbol / sym_heatmap.py View on Github external
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
    if not dcn:
      conv = mx.symbol.Convolution(
          data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
    else:
        conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
                num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
        conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
                num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
    bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
    if with_act:
      act = Act(bn, act_type, name=name+'_relu')
      #act = mx.symbol.Activation(
      #    data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
      return act
    else:
      return bn
github GT-RAIL / rail_object_detection / rail_object_detector / libs / drfcn / symbols / resnet_v1_101_rfcn_dcn.py View on Github external
def get_resnet_v1_conv4(self, data):
        conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2),
                                      no_bias=True)
        bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps)
        scale_conv1 = bn_conv1
        conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
        pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3),
                                  stride=(2, 2), pool_type='max')
        res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1),
                                              stride=(1, 1), no_bias=True)
        bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
        scale2a_branch1 = bn2a_branch1
        res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1),
                                               stride=(1, 1), no_bias=True)
        bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True,
                                            fix_gamma=False, eps=self.eps)
        scale2a_branch2a = bn2a_branch2a
        res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
        res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1),
                                               kernel=(3, 3), stride=(1, 1), no_bias=True)
        bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True,
                                            fix_gamma=False, eps=self.eps)
        scale2a_branch2b = bn2a_branch2b
github pengwangucla / DeLS-3D / networks / util_layers.py View on Github external
kernel=(64,64),
                stride=(32,32),
                numclass=21,
                name='score',
                workspace_default=WORK_SPACE):
    # score out
    bigscore = mx.symbol.Deconvolution(data=input,
            kernel=kernel,
            stride=stride,
            adj=(stride[0]-1, stride[1]-1),
            num_filter=numclass,
            workspace=workspace_default,
            name=name)

    upscore = mx.symbol.Crop(*[bigscore, crop], offset=offset, name="upscore")
    softmax = mx.symbol.SoftmaxOutput(data=upscore,
            multi_output=True, use_ignore=True, ignore_label=255, name="softmax")
    return softmax
github bleakie / MaskInsightface / src / train.py View on Github external
extra_loss = mx.symbol.MakeLoss(triplet_loss)
  elif args.loss_type==13: #triplet loss with angular margin
    m = args.margin_m
    sin_m = math.sin(m)
    cos_m = math.cos(m)
    nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n')
    anchor = mx.symbol.slice_axis(nembedding, axis=0, begin=0, end=args.per_batch_size//3)
    positive = mx.symbol.slice_axis(nembedding, axis=0, begin=args.per_batch_size//3, end=2*args.per_batch_size//3)
    negative = mx.symbol.slice_axis(nembedding, axis=0, begin=2*args.per_batch_size//3, end=args.per_batch_size)
    ap = anchor * positive
    an = anchor * negative
    ap = mx.symbol.sum(ap, axis=1, keepdims=1) #(T,1)
    an = mx.symbol.sum(an, axis=1, keepdims=1) #(T,1)

    ap = mx.symbol.arccos(ap)
    an = mx.symbol.arccos(an)
    triplet_loss = mx.symbol.Activation(data = (ap-an+args.margin_m), act_type='relu')

    #body = ap*ap
    #body = 1.0-body
    #body = mx.symbol.sqrt(body)
    #body = body*sin_m
    #ap = ap*cos_m
    #ap = ap-body
    #triplet_loss = mx.symbol.Activation(data = (an-ap), act_type='relu')

    triplet_loss = mx.symbol.mean(triplet_loss)
    extra_loss = mx.symbol.MakeLoss(triplet_loss)
  elif args.loss_type==9: #coco loss
    centroids = []
    for i in range(args.per_identities):
      xs = mx.symbol.slice_axis(embedding, axis=0, begin=i*args.images_per_identity, end=(i+1)*args.images_per_identity)
github hpi-xnor / BMXNet-v2 / example / reinforcement-learning / ddpg / utils.py View on Github external
def define_qfunc(obs, act):

    net = mx.symbol.FullyConnected(
        data=obs,
        name="qfunc_fc1",
        num_hidden=32)
    net = mx.symbol.Activation(
        data=net,
        name="qfunc_relu1",
        act_type="relu")
    net = mx.symbol.FullyConnected(
        data=net,
        name="qfunc_fc2",
        num_hidden=32)
    net = mx.symbol.Activation(
        data=net,
        name="qfunc_relu2",
        act_type="relu")
    net = mx.symbol.Concat(net, act, name="qunfc_concat")
github msracver / Deformable-ConvNets / faster_rcnn / symbols / resnet_v1_101_rcnn_dcn.py View on Github external
rois, score = mx.contrib.sym.Proposal(
                    cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
                    output_score=True,
                    feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
                    ratios=tuple(cfg.network.ANCHOR_RATIOS),
                    rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
                    threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
            else:
                rois, score = mx.sym.Custom(
                    cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
                    output_score=True,
                    op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
                    scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
                    rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
                    threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
                group = mx.symbol.Group([rois, score])
        self.sym = group
        return group
github alibaba / x-deeplearning / xdl / xdl / python / training / saver.py View on Github external
def set_mx_output(self, backend_symbol):
        import mxnet
        if isinstance(backend_symbol, mxnet.symbol.symbol.Symbol):
            self._output_op_name = backend_symbol.name
    def set_tf_output(self, backend_symbol):
github TuSimple / mx-maskrcnn / rcnn / symbol / symbol_mask_fpn.py View on Github external
def get_resnet_conv_down(conv_feat):
    # C5 to P5, 1x1 dimension reduction to 256
    P5 = mx.symbol.Convolution(data=conv_feat[0], kernel=(1, 1), num_filter=256, name="P5_lateral")

    # P5 2x upsampling + C4 = P4
    P5_up   = mx.symbol.UpSampling(P5, scale=2, sample_type='nearest', workspace=512, name='P5_upsampling', num_args=1)
    P4_la   = mx.symbol.Convolution(data=conv_feat[1], kernel=(1, 1), num_filter=256, name="P4_lateral")
    P5_clip = mx.symbol.Crop(*[P5_up, P4_la], name="P4_clip")
    P4      = mx.sym.ElementWiseSum(*[P5_clip, P4_la], name="P4_sum")
    P4      = mx.symbol.Convolution(data=P4, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P4_aggregate")

    # P4 2x upsampling + C3 = P3
    P4_up   = mx.symbol.UpSampling(P4, scale=2, sample_type='nearest', workspace=512, name='P4_upsampling', num_args=1)
    P3_la   = mx.symbol.Convolution(data=conv_feat[2], kernel=(1, 1), num_filter=256, name="P3_lateral")
    P4_clip = mx.symbol.Crop(*[P4_up, P3_la], name="P3_clip")
    P3      = mx.sym.ElementWiseSum(*[P4_clip, P3_la], name="P3_sum")
    P3      = mx.symbol.Convolution(data=P3, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P3_aggregate")

    # P3 2x upsampling + C2 = P2
    P3_up   = mx.symbol.UpSampling(P3, scale=2, sample_type='nearest', workspace=512, name='P3_upsampling', num_args=1)
    P2_la   = mx.symbol.Convolution(data=conv_feat[3], kernel=(1, 1), num_filter=256, name="P2_lateral")
    P3_clip = mx.symbol.Crop(*[P3_up, P2_la], name="P2_clip")
    P2      = mx.sym.ElementWiseSum(*[P3_clip, P2_la], name="P2_sum")
    P2      = mx.symbol.Convolution(data=P2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P2_aggregate")

    # P6 2x subsampling P5
    P6 = mx.symbol.Pooling(data=P5, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='P6_subsampling')

    conv_fpn_feat = dict()
    conv_fpn_feat.update({"stride64":P6, "stride32":P5, "stride16":P4, "stride8":P3, "stride4":P2})