Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def add_query_params(url, params):
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
for name, value in iteritems(params):
if value:
query_params[name] = [value]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def _is_effect(self, sign, direction = None, resources = None):
_sign = getattr(self, sign)
_resources = self._resources_set(resources)
return (
any(
bool(
_evidences
if not _resources else
_evidences & _resources
)
for _direction, _evidences in iteritems(_sign)
if not direction or direction == _direction
)
def get_params(paramstring):
"""
Convert a URL-encoded paramstring to a Python dict
:param paramstring: URL-encoded paramstring
:type paramstring: str
:return: parsed paramstring
:rtype: Params
"""
raw_params = parse_qs(paramstring)
params = Params()
for key, value in iteritems(raw_params):
param_value = value[0] if len(value) == 1 else value
params[key] = py2_decode(param_value)
return params
Hamiltonian.
Returns:
runtime_commutator: The time it takes to compute a commutator, after
partitioning the terms and normal ordering, using the regular
commutator function.
runtime_diagonal_commutator: The time it takes to compute the same
commutator using methods restricted to diagonal Coulomb operators.
"""
hamiltonian = normal_ordered(jellium_model(Grid(2, side_length, 1.),
plane_wave=False))
part_a = FermionOperator.zero()
part_b = FermionOperator.zero()
add_to_a_or_b = 0 # add to a if 0; add to b if 1
for term, coeff in iteritems(hamiltonian.terms):
# Partition terms in the Hamiltonian into part_a or part_b
if add_to_a_or_b:
part_a += FermionOperator(term, coeff)
else:
part_b += FermionOperator(term, coeff)
add_to_a_or_b ^= 1
start = time.time()
_ = normal_ordered(commutator(part_a, part_b))
end = time.time()
runtime_commutator = end - start
start = time.time()
_ = commutator_ordered_diagonal_coulomb_with_two_body_operator(
part_a, part_b)
end = time.time()
def _prepare_long_request(url, api_query):
""" Use requests.Request and requests.PreparedRequest to produce the
body (and boundary value) of a multipart/form-data; POST request as
detailed in https://www.mediawiki.org/wiki/API:Edit#Large_texts
"""
partlist = []
for k, v in iteritems(api_query):
if k in ('title', 'text', 'summary'):
# title, text and summary values in the request
# should be utf-8 encoded
part = (k,
(None, v.encode('utf-8'),
'text/plain; charset=UTF-8',
{'Content-Transfer-Encoding': '8bit'}
)
)
else:
part = (k, (None, v))
partlist.append(part)
return Request(url=url, files=partlist).prepare()
def to_dict(self):
ret = super(Inputs, self).to_dict()
ret["parameters"] = {}
ret["artifacts"] = {}
ret["volumes"] = {}
for param_key, param_val in iteritems(self.parameters):
ret["parameters"][param_key] = param_val.to_dict()
for artifact_key, artifact_val in iteritems(self.artifacts):
ret["artifacts"][artifact_key] = artifact_val.to_dict()
for vol_key, vol_val in iteritems(self.volumes):
ret["volumes"][vol_key] = vol_val.to_dict()
return ret
def aniresize(width, height, rid):
for k, frame in iteritems(peers[rid]):
if k == 'bounds':
bounds = peers[rid]['bounds']
bounds['sca_pts'][:] = bounds['hints']
scale(bounds['sca_pts'], bounds['length'], width, height)
break
else:
for k, frame in iteritems(peers[rid]):
if k != 'bbox' and k != 'seg' and k != '':
frame['sca_pts'][:] = frame['hints']
scale(frame['sca_pts'], frame['length'], width, height)
for desc in value_to_flatten.type_infos:
if desc.name == "metadata":
continue
if hasattr(self, desc.name) and value_to_flatten.HasField(desc.name):
setattr(self, desc.name, getattr(value_to_flatten, desc.name))
descriptors = []
enums = {}
# Metadata is always the first field of exported data.
descriptors.append(
rdf_structs.ProtoEmbedded(
name="metadata", field_number=1, nested=ExportedMetadata))
for number, desc in sorted(iteritems(value.type_infos_by_field_number)):
# Name 'metadata' is reserved to store ExportedMetadata value.
if desc.name == "metadata":
logging.debug("Ignoring 'metadata' field in %s.",
value.__class__.__name__)
continue
# Copy descriptors for primivie values as-is, just make sure their
# field number is correct.
if isinstance(desc, (rdf_structs.ProtoBinary, rdf_structs.ProtoString,
rdf_structs.ProtoUnsignedInteger,
rdf_structs.ProtoRDFValue, rdf_structs.ProtoEnum)):
# Incrementing field number by 1, as 1 is always occuppied by metadata.
descriptors.append(desc.Copy(field_number=number + 1))
if (isinstance(desc, rdf_structs.ProtoEnum) and
not isinstance(desc, rdf_structs.ProtoBoolean)):
def _recurse(v, reflective):
if isinstance(v, Serializable):
return v.serialize(reflective=reflective)
if isinstance(v, set):
v = list(v) # convert sets to lists
if isinstance(v, list):
return [_recurse(vi, reflective) for vi in v]
if isinstance(v, dict):
return OrderedDict(
(str(ki), _recurse(vi, reflective)) for ki, vi in iteritems(v)
)
if isinstance(v, np.ndarray):
return serialize_numpy_array(v)
if hasattr(v, "serialize") and callable(v.serialize):
return v.serialize()
if hasattr(v, "to_dict") and callable(v.to_dict):
return v.to_dict()
return v
def _set_name(self):
self._log('Collecting short names of GO terms.')
self.name = (
self._name_provided or
dict(
i
for ii in self._terms.values()
for i in iteritems(ii)
)