How to use the six.moves.range function in six

To help you get started, we’ve selected a few six examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github arnimarj / py-pointless / tests / python_api / test_primvector.py View on Github external
def testPop(self):
		w = pointless.PointlessPrimVector('u32')
		self.assertRaises(IndexError, w.pop)

		w = pointless.PointlessPrimVector('u32', sequence = six.moves.range(1000))

		self.assert_(len(w) == 1000)

		for i in six.moves.range(1000):
			n = w.pop()
			self.assert_(n == 1000 - i - 1)

		self.assert_(len(w) == 0)
		self.assertRaises(IndexError, w.pop)
github anisayari / Smarties / Smarties / rake.py View on Github external
def adjoined_candidates_from_sentence(s, stoplist, min_keywords, max_keywords):
    # Initializes the candidate list to empty
    candidates = []
    # Splits the sentence to get a list of lowercase words
    sl = s.lower().split()
    # For each possible length of the adjoined candidate
    for num_keywords in range(min_keywords, max_keywords + 1):
        # Until the third-last word
        for i in range(0, len(sl) - num_keywords):
            # Position i marks the first word of the candidate. Proceeds only if it's not a stopword
            if sl[i] not in stoplist:
                candidate = sl[i]
                # Initializes j (the pointer to the next word) to 1
                j = 1
                # Initializes the word counter. This counts the non-stopwords words in the candidate
                keyword_counter = 1
                contains_stopword = False
                # Until the word count reaches the maximum number of keywords or the end is reached
                while keyword_counter < num_keywords and i + j < len(sl):
                    # Adds the next word to the candidate
                    candidate = candidate + ' ' + sl[i + j]
                    # If it's not a stopword, increase the word counter. If it is, turn on the flag
                    if sl[i + j] not in stoplist:
github tensorflow / tensor2tensor / tensor2tensor / layers / common_layers.py View on Github external
def smoothing_cross_entropy_factored_grad(op, dy):
  """Gradient function for smoothing_cross_entropy_factored."""
  a = op.inputs[0]
  b = op.inputs[1]
  labels = op.inputs[2]
  confidence = op.inputs[3]
  num_splits = 16
  vocab_size = shape_list(b)[0]
  labels = approximate_split(labels, num_splits)
  a = approximate_split(a, num_splits)
  dy = approximate_split(dy, num_splits)
  b_grad = None
  a_grad_parts = []
  deps = []
  for part in range(num_splits):
    with tf.control_dependencies(deps):
      logits = tf.matmul(a[part], b, transpose_b=True)
      output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
                                            confidence)
      a_grad_part, b_grad_part = tf.gradients(
          ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
      a_grad_parts.append(a_grad_part)
      if part > 0:
        b_grad += b_grad_part
      else:
        b_grad = b_grad_part
      deps = [b_grad, a_grad_part]
  a_grad = tf.concat(a_grad_parts, 0)
  return a_grad, b_grad, None, None
github jhuapl-boss / intern / ndio / remote / OCP.py View on Github external
chunk_depth=16,  q_index=1)

            volume = numpy.zeros((
                    x_bounds[1]-x_bounds[0],
                    y_bounds[1]-y_bounds[0],
                    z_bounds[1]-z_bounds[0]))

            # TODO: Optimize.
            for chunk in downloaded_chunks:
                x_range, y_range, z_range = chunk[0]
                xi = 0
                for x in range(x_range[0], x_range[1]):
                    yi = 0
                    for y in range(y_range[0], y_range[1]):
                        zi = 0
                        for z in range(z_range[0], z_range[1]):
                            volume[x-x_range[0]][y-y_range[0]][z-z_range[0]] =\
                                chunk[1][zi][xi][yi]
                            zi += 1
                        yi += 1
                    xi += 1
            return volume
github facebookincubator / FCR / if / py / gen-py / fbnet / command_runner_asyncio / CommandRunner / Command.py View on Github external
return
    if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
      fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
      self.checkRequired()
      return
    iprot.readStructBegin()
    while True:
      (fname, ftype, fid) = iprot.readFieldBegin()
      if ftype == TType.STOP:
        break
      if fid == 1:
        if ftype == TType.MAP:
          self.device_to_commands = {}
          (_ktype12, _vtype13, _size11 ) = iprot.readMapBegin() 
          if _size11 >= 0:
            for _i15 in six.moves.range(_size11):
              _key16 = Device()
              _key16.read(iprot)
              _val17 = []
              (_etype21, _size18) = iprot.readListBegin()
              if _size18 >= 0:
                for _i22 in six.moves.range(_size18):
                  _elem23 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
                  _val17.append(_elem23)
              else: 
                while iprot.peekList():
                  _elem24 = iprot.readString().decode('utf-8') if UTF8STRINGS else iprot.readString()
                  _val17.append(_elem24)
              iprot.readListEnd()
              self.device_to_commands[_key16] = _val17
          else: 
            while iprot.peekMap():
github openstack / neutron / neutron / plugins / ml2 / drivers / type_vlan.py View on Github external
allocations = dict()
            allocs = vlanalloc.VlanAllocation.get_objects(ctx)
            for alloc in allocs:
                if alloc.physical_network not in allocations:
                    allocations[alloc.physical_network] = list()
                allocations[alloc.physical_network].append(alloc)

            # process vlan ranges for each configured physical network
            ranges = self.get_network_segment_ranges()
            for (physical_network,
                 vlan_ranges) in ranges.items():
                # determine current configured allocatable vlans for
                # this physical network
                vlan_ids = set()
                for vlan_min, vlan_max in vlan_ranges:
                    vlan_ids |= set(moves.range(vlan_min, vlan_max + 1))

                # remove from table unallocated vlans not currently
                # allocatable
                if physical_network in allocations:
                    for alloc in allocations[physical_network]:
                        try:
                            # see if vlan is allocatable
                            vlan_ids.remove(alloc.vlan_id)
                        except KeyError:
                            # it's not allocatable, so check if its allocated
                            if not alloc.allocated:
                                # it's not, so remove it from table
                                LOG.debug("Removing vlan %(vlan_id)s on "
                                          "physical network "
                                          "%(physical_network)s from pool",
                                          {'vlan_id': alloc.vlan_id,
github openpreserve / fido / fido / fido.py View on Github external
eofbuffer = bofbuffer
            elif bytes_unread < self.bufsize:
                # The buffs overlap
                eofbuffer = bofbuffer[
                    bytes_unread:] + blocking_read(stream, bytes_unread)
            elif bytes_unread == self.bufsize:
                eofbuffer = blocking_read(stream, self.bufsize)
            elif seekable:  # easy case when we can just seek!
                stream.seek(length - self.bufsize)
                eofbuffer = blocking_read(stream, self.bufsize)
            else:
                # We have more to read and know how much.
                # n*bufsize + r = length
                (n, r) = divmod(bytes_unread, self.bufsize)
                # skip n-1*bufsize bytes
                for _ in range(1, n):
                    blocking_read(stream, self.bufsize)
                # skip r bytes
                blocking_read(stream, r)
                # and read the remaining bufsize bytes into the eofbuffer
                eofbuffer = blocking_read(stream, self.bufsize)
            return bofbuffer, eofbuffer, bytes_to_read
github cctbx / cctbx_project / scitbx / matrix / __init__.py View on Github external
def min_index(self):
    result = None
    for i in range(len(self.elems)):
      if (result is None or self.elems[result] > self.elems[i]):
        result = i
    return result
github sfepy / sfepy / sfepy / discrete / problem.py View on Github external
ls_conf : Struct, optional
            The linear solver options.
        nls_conf : Struct, optional
            The nonlinear solver options.
        force : bool
            If True, re-create the solver instances even if they already exist
            in `self.nls` attribute.
        """
        if (self.solver is None) or force:
            ls_conf = get_default(ls_conf, self.ls_conf,
                                  'you must set linear solver!')
            nls_conf = get_default(nls_conf, self.nls_conf,
                                   'you must set nonlinear solver!')

            fb_list = []
            for ii in range(100):
                fb_list.append((ls_conf.kind, ls_conf))
                if hasattr(ls_conf, 'fallback'):
                    ls_conf = self.solver_confs[ls_conf.fallback]
                else:
                    break

            if len(fb_list) > 1:
                ls = use_first_available(fb_list, context=self)
            else:
                ls = Solver.any_from_conf(ls_conf, context=self)

            ev = self.get_evaluator()

            if self.conf.options.get('ulf', False):
                self.nls_iter_hook = ev.new_ulf_iteration
github sony / nnabla-examples / reduction / cifar10 / structured-sparsity / classification.py View on Github external
# Create monitor.
    from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
    monitor = Monitor(args.monitor_path)
    monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
    monitor_err = MonitorSeries("Training error", monitor, interval=10)
    monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
    monitor_verr = MonitorSeries("Test error", monitor, interval=1)

    # Initialize DataIterator
    data = data_iterator(args.batch_size, True)
    vdata = data_iterator(args.batch_size, False)
    best_ve = 1.0
    ve = 1.0
    # Training loop.
    for i in range(args.max_iter):
        if i % args.val_interval == 0:
            # Validation
            ve = 0.0
            for j in range(int(n_valid / args.batch_size)):
                vimage.d, vlabel.d = vdata.next()
                vpred.forward(clear_buffer=True)
                ve += categorical_error(vpred.d, vlabel.d)
            ve /= int(n_valid / args.batch_size)
            monitor_verr.add(i, ve)
        if ve < best_ve:
            nn.save_parameters(os.path.join(
                args.model_save_path, 'params_%06d.h5' % i))
            best_ve = ve
        # Training forward
        image.d, label.d = data.next()
        solver.zero_grad()