Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.drivers.postgres._connections.PostgresDb
:type metadata_type_resource: datacube.index._metadata_types.MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
self.get_unsafe = lru_cache()(self.get_unsafe)
self.get_by_name_unsafe = lru_cache()(self.get_by_name_unsafe)
@lru_cache(maxsize=1)
def get_ng_axes(self): # type: () -> Axes
"""
Create an ngraph Axes object matching the shape of this value.
"""
if self.type.sparse_tensor_type.elem_type != onnx_pb2.TensorProto.UNDEFINED:
raise NotImplementedError('Sparse tensors (SparseTensorTypeProto) not supported yet.')
shape = []
for dim in self.type.tensor_type.shape.dim:
if dim.dim_param:
raise NotImplementedError('Symbolic variable representation of '
'tensor shape (dim_param) not supported yet.')
shape.append(dim.dim_value)
return ng.make_axes(axes=make_pos_axes(shape))
@cachetools.func.lru_cache(maxsize=3000000)
def single(document, model="en", embeddings_path=None, attributes=None, local=False):
attributes = convert_attr(attributes)
needs_root = "root" in attributes
nlp_ = get_nlp(model, embeddings_path)
if local:
sentences = nlp_(document)
else:
sentences = []
for sent in nlp_(document).sents:
tokenized_sentence = [{x: json_safety(token, x) for x in attributes}
for token in sent]
if needs_root:
add_root_attribute(tokenized_sentence, sent)
sentences.append(tokenized_sentence)
return sentences
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.drivers.postgres._connections.PostgresDb
:type metadata_type_resource: datacube.index._metadata_types.MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
self.get_unsafe = lru_cache()(self.get_unsafe)
self.get_by_name_unsafe = lru_cache()(self.get_by_name_unsafe)
@lru_cache()
def resolve_project_id(session, project=None):
"""Resolve the ID of a project based on ID, name or the current context.
This helper encapsulates logic for determining a project in three
situations:
* If ``None`` is passed as the project, or if no project is passed, the
project will be inferred from the runtime context (i.e. environment
variables), and so will correspond to the 'current project' when run
inside Faculty platform.
* If a ``uuid.UUID`` or a string containing a valid UUID is passed, this
will be assumed to be the ID of the project and will be returned.
* If any other string is passed, the Faculty platform will be queried for
projects matching that name. If exactly one of that name is accessible to
the user, its ID will be returned, otherwise a ``ValueError`` will be
raised.
@lru_cache(maxsize=1)
def _lookup_team_roles():
return {role.name: role for role in TeamRole.select()}
@lru_cache(maxsize=1)
def get_ng_variable(self): # type: () -> Op
"""
Create an ngraph variable node for this value.
:return: AssignableTensorOp
"""
axes = self.get_ng_axes()
dtype = self.get_dtype()
if self.has_initializer:
initializer = self.get_initializer()
return ng.variable(axes=axes, dtype=dtype,
initial_value=initializer.to_array()).named(self.name)
return ng.variable(axes=axes, dtype=dtype).named(self.name)
@lru_cache()
def layer_covariance(layer1, layer2=None):
"""Computes the covariance matrix between the neurons of two layers. If only one
layer is passed, computes the symmetric covariance matrix of that layer."""
layer2 = layer2 or layer1
act1, act2 = layer1.activations, layer2.activations
num_datapoints = act1.shape[0] # cast to avoid numpy type promotion during division
return np.matmul(act1.T, act2) / float(num_datapoints)
@lru_cache(maxsize=1)
def _get_route_data():
return swagger_route_data(include_internal=True, compact=True)
@lru_cache(maxsize=1)
def get_label_source_types():
source_type_map = {}
for kind in LabelSourceType.select():
source_type_map[kind.id] = kind.name
source_type_map[kind.name] = kind.id
return source_type_map