How to use the six.moves.xrange function in six

To help you get started, we’ve selected a few six examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github miyosuda / TensorFlowAndroidMNIST / jni-build / jni / include / tensorflow / python / ops / data_flow_grad.py View on Github external
def _DynamicStitchGrads(op, grad):
  """Gradients for DynamicStitch."""

  num_values = len(op.inputs) // 2
  indices_grad = [None] * num_values

  def AsInt32(x):
    return (x if op.inputs[0].dtype == dtypes.int32 else
            math_ops.cast(x, dtypes.int32))
  inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
  if isinstance(grad, ops.IndexedSlices):
    output_shape = array_ops.shape(op.outputs[0])
    output_rows = output_shape[0]
    grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
  values_grad = [array_ops.gather(grad, inp) for inp in inputs]
  return indices_grad + values_grad
github chainer / chainercv / tests / links_tests / model_tests / ssd_tests / test_multibox_loss.py View on Github external
gt_mb_labels = gt_mb_labels.array

        mb_locs = cuda.to_cpu(mb_locs)
        mb_confs = cuda.to_cpu(mb_confs)
        gt_mb_locs = cuda.to_cpu(gt_mb_locs)
        gt_mb_labels = cuda.to_cpu(gt_mb_labels)
        loc_loss = cuda.to_cpu(loc_loss.array)
        conf_loss = cuda.to_cpu(conf_loss.array)

        n_positive_total = 0
        expect_loc_loss = 0
        expect_conf_loss = 0
        for i in six.moves.xrange(gt_mb_labels.shape[0]):
            n_positive = 0
            negatives = []
            for j in six.moves.xrange(gt_mb_labels.shape[1]):
                loc = F.huber_loss(
                    mb_locs[np.newaxis, i, j],
                    gt_mb_locs[np.newaxis, i, j], 1).array
                conf = F.softmax_cross_entropy(
                    mb_confs[np.newaxis, i, j],
                    gt_mb_labels[np.newaxis, i, j]).array

                if gt_mb_labels[i, j] > 0:
                    n_positive += 1
                    expect_loc_loss += loc
                    expect_conf_loss += conf
                else:
                    negatives.append(conf)

            n_positive_total += n_positive
            if n_positive > 0:
github tensorflow / tensorflow / tensorflow / python / debug / lib / debug_utils.py View on Github external
ops = graph.get_operations()
  for op in ops:
    # Skip nodes without any output tensors.
    if not op.outputs:
      continue

    node_name = op.name
    op_type = op.type

    if node_name_pattern and not node_name_pattern.match(node_name):
      continue
    if op_type_pattern and not op_type_pattern.match(op_type):
      continue

    for slot in xrange(len(op.outputs)):
      if (tensor_dtype_pattern and
          not tensor_dtype_pattern.match(op.outputs[slot].dtype.name)):
        continue

      add_debug_tensor_watch(
          run_options,
          node_name,
          output_slot=slot,
          debug_ops=debug_ops,
          debug_urls=debug_urls,
          tolerate_debug_op_creation_failures=(
              tolerate_debug_op_creation_failures),
          global_step=global_step)
  run_options.debug_options.reset_disk_byte_usage = reset_disk_byte_usage
github influxdata / influxdb-python / influxdb / client.py View on Github external
def _batches(iterable, size):
        for i in xrange(0, len(iterable), size):
            yield iterable[i:i + size]
github Coalfire-Research / Slackor / impacket / impacket / examples / os_ident.py View on Github external
return

        ipidclasses = {
            nmap2_seq.IPID_SEQ_INCR: 'I',
            nmap2_seq.IPID_SEQ_BROKEN_INCR: 'BI',
            nmap2_seq.IPID_SEQ_RPI: 'RI',
            nmap2_seq.IPID_SEQ_RD: 'RD',
            nmap2_seq.IPID_SEQ_CONSTANT: 'C',
            nmap2_seq.IPID_SEQ_ZERO: 'Z',
        }

        ipid_diffs = array.array('H', [0] * (self.seq_num_responses - 1))

        # Random and zero
        null_ipids = 1
        for i in xrange(1, self.seq_num_responses):
            prev_ipid = self.seq_responses[i-1].get_ipid()
            cur_ipid = self.seq_responses[i].get_ipid()

            if prev_ipid != 0 or cur_ipid != 0: 
                null_ipids = 0

            if prev_ipid <= cur_ipid:
                ipid_diffs[i-1] = cur_ipid - prev_ipid
            else:
                ipid_diffs[i-1] = (cur_ipid - prev_ipid + 65536) & 0xffff

            if self.seq_num_responses > 2 and ipid_diffs[i-1] > 20000:
                self.add_result('TI', ipidclasses[nmap2_seq.IPID_SEQ_RD])
                return

        if null_ipids:
github PaddlePaddle / models / fluid / PaddleNLP / neural_machine_translation / transformer / train.py View on Github external
if args.val_file_pattern is not None:
        test = test_context(exe, train_exe, dev_count)

    # the best cross-entropy value with label smoothing
    loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log(
        (1. - TrainTaskConfig.label_smooth_eps
         )) + TrainTaskConfig.label_smooth_eps *
                        np.log(TrainTaskConfig.label_smooth_eps / (
                            ModelHyperParams.trg_vocab_size - 1) + 1e-20))

    step_idx = 0
    init_flag = True

    logging.info("begin train")
    for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
        pass_start_time = time.time()

        if args.use_py_reader:
            pyreader.start()
            data_generator = None
        else:
            data_generator = train_data()

        batch_id = 0
        while True:
            try:
                feed_dict_list = prepare_feed_dict_list(data_generator,
                                                        init_flag, dev_count)
                outs = train_exe.run(
                    fetch_list=[sum_cost.name, token_num.name]
                    if step_idx % args.fetch_steps == 0 else [],
github Theano / Theano / theano / scan_module / scan_utils.py View on Github external
with the outputs that can not be removed.

    """
    non_removable = [o for i, o in enumerate(op.outputs) if i not in
                     out_idxs]
    required_inputs = gof.graph.inputs(non_removable)

    out_ins = []
    offset = op.n_seqs
    lim = op.n_mit_mot + op.n_mit_sot + op.n_sit_sot
    for idx in range(lim):
        n_ins = len(op.info['tap_array'][idx])
        out_ins += [op.inputs[offset:offset + n_ins]]
        offset += n_ins
    out_ins += [[] for k in xrange(op.n_nit_sot)]
    out_ins += [[op.inputs[offset + k]] for k in xrange(op.n_shared_outs)]

    added = True
    out_idxs_mask = [1 for idx in out_idxs]
    while added:
        added = False
        for pos, idx in enumerate(out_idxs):
            if (out_idxs_mask[pos] and
                 numpy.any([x in required_inputs for x in out_ins[idx]])):
                # This output is required ..
                out_idxs_mask[pos] = 0
                required_inputs += gof.graph.inputs([op.outputs[idx]])
                added = True

    required_outs = [x for i, x in enumerate(out_idxs)
                        if out_idxs_mask[i] == 0]
    not_required = [x for i, x in enumerate(out_idxs) if out_idxs_mask[i] == 1]
github byungsook / vectornet / linenet / linenet_data.py View on Github external
def _slur_image(img):
    # img = face(gray=True)
    # plt.imshow(img, cmap=plt.cm.gray)
    # plt.show()

    # gaussian blur
    gauss_denoised = ndimage.gaussian_filter(img, 0.1)
    # plt.imshow(gauss_denoised, cmap=plt.cm.gray)
    # plt.show()

    # duplicate
    num_duplicates = np.random.randint(low=FLAGS.noise_duplicate_min, high=FLAGS.noise_duplicate_max+1)
    blend = np.zeros(gauss_denoised.shape)
    for i in xrange(num_duplicates):
        # rotate
        rnd_offset = np.random.rand(1) * 2.0 - 1.0
        rotated_face = ndimage.rotate(gauss_denoised, rnd_offset * FLAGS.noise_rot_deg, reshape=False)
        
        # translate
        rnd_offset = np.random.rand(2) * 2.0 - 1.0
        shifted_face = ndimage.shift(rotated_face, rnd_offset * FLAGS.noise_trans_pix)
        
        # blend duplicates
        weight_min = 0.75
        weight = np.random.rand() * (1.0 - weight_min) + weight_min
        blend = blend + weight * shifted_face

    # add noise
    noisy = blend + FLAGS.noise_intensity * np.random.randn(*blend.shape)
    noisy = np.clip(noisy, a_min=0.0, a_max=255.0) / 255.0
github tensorflow / tensorflow / tensorflow / models / embedding / word2vec.py View on Github external
def save_vocab(self):
    """Save the vocabulary to a file so the model can be reloaded."""
    opts = self._options
    with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
      for i in xrange(opts.vocab_size):
        vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
        f.write("%s %d\n" % (vocab_word,
                             opts.vocab_counts[i]))
github sisl / MADRL / madrl_environments / walker / multi_walker.py View on Github external
def step(self, actions):
        act_vec = np.reshape(actions, (self.n_walkers, 4))
        assert len(act_vec) == self.n_walkers
        for i in xrange(self.n_walkers):
            self.walkers[i].apply_action(act_vec[i])

        self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)

        obs = [walker.get_observation() for walker in self.walkers]

        xpos = np.zeros(self.n_walkers)
        obs = []
        done = False
        rewards = np.zeros(self.n_walkers)

        for i in xrange(self.n_walkers):
            pos = self.walkers[i].hull.position
            x, y = pos.x, pos.y
            xpos[i] = x

            wobs = self.walkers[i].get_observation()
            nobs = []
            for j in [i - 1, i + 1]:
                # if no neighbor (for edge walkers)
                if j < 0 or j == self.n_walkers:
                    nobs.append(0.0)
                    nobs.append(0.0)
                else:
                    xm = (self.walkers[j].hull.position.x - x) / self.package_length
                    ym = (self.walkers[j].hull.position.y - y) / self.package_length
                    nobs.append(np.random.normal(xm, self.position_noise))
                    nobs.append(np.random.normal(ym, self.position_noise))