Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_MultiSelect(env):
from clld.web.util.multiselect import MultiSelect
ms = MultiSelect(env['request'], common.Language, 'x', url='/')
ms.render()
ms.render(selected=[common.Language.first()])
ms.format_result(common.Language(id='x'))
def test_MultiSelect(env):
from clld.web.util.multiselect import MultiSelect
ms = MultiSelect(env['request'], common.Language, 'x', url='/')
ms.render()
ms.render(selected=[common.Language.first()])
ms.format_result(common.Language(id='x'))
def test_Data(db):
from clld.db.models.common import Language, Language_data
l = Language(id='abc', name='Name')
l.data.append(Language_data(key='abstract', value='c'))
DBSession.add(l)
DBSession.flush()
DBSession.refresh(l)
assert l.datadict()['abstract'] == 'c'
def test_Base_jsondata(db):
l = Language(id='abc', name='Name')
VersionedDBSession.add(l)
VersionedDBSession.flush()
l.update_jsondata(a=1)
assert 'a' in l.jsondata
l.update_jsondata(b=1)
assert 'b' in l.jsondata and 'a' in l.jsondata
assert 'b' in l.__json__(None)['jsondata']
def test_Language():
from clld.db.models.common import Language
d = Language(id='abc')
assert d.glottocode is None
assert d.iso_code is None
def test_Data(mocker):
from clld.db.models.common import Language
from clld.scripts.util import Data
session = set()
mocker.patch('clld.scripts.util.DBSession', session)
d = Data(jsondata={})
d.add(Language, 'l', id='l', name='l')
assert session
d.add(Language, 'l2', _obj=5)
with pytest.raises(ValueError):
d.add(Language, 'l3', id='l.3')
def resourcemap(req):
"""Resource-specific JSON response listing all resource instances."""
rsc = req.params.get('rsc')
if rsc == 'language':
q = DBSession.query(
common.Language.id,
common.Language.name,
common.Language.latitude,
common.Language.longitude,
common.Identifier.type.label('itype'),
common.Identifier.name.label('iname')
).outerjoin(join(
common.LanguageIdentifier,
common.Identifier, and_(
common.LanguageIdentifier.identifier_pk == common.Identifier.pk,
common.Identifier.type != 'name')
)).filter(common.Language.active == true()).order_by(common.Language.id)
def resources():
for (id, name, lat, lon), rows in groupby(q, itemgetter(0, 1, 2, 3)):
identifiers = [
{'type': r.itype, 'identifier': r.iname.lower()
if r.itype.startswith('WALS') else r.iname}
return {'label': item.valueset.name}
class RefsCol(BaseRefsCol):
"""Listing sources for the corresponding ValueSet."""
def get_obj(self, item):
return item.valueset
class Values(DataTable):
"""Default DataTable for Value objects."""
__constraints__ = [Parameter, Contribution, Language]
def base_query(self, query):
query = query.join(ValueSet).options(
joinedload(
Value.valueset
).joinedload(
ValueSet.references
).joinedload(
ValueSetReference.source
)
)
if self.language:
query = query.join(ValueSet.parameter)
return query.filter(ValueSet.language_pk == self.language.pk)
def main(args): # pragma: no cover
ldstatus = {}
lpks = DBSession.query(Language.pk) \
.filter(Language.active == True) \
.filter(Language.latitude != None) \
.filter(Languoid.level == LanguoidLevel.language)\
.order_by(Language.pk).all()
print(len(lpks))
sql = """\
select ls.source_pk, count(ls.language_pk) from languagesource as ls, ref as r
where ls.source_pk = r.pk and r.ca_doctype_trigger is null and r.ca_language_trigger is null
group by source_pk
"""
lcounts = {r[0]: r[1] for r in DBSession.execute(sql)}
# loop over active, established languages with geo-coords
for i, lpk in enumerate(lpks):
l = DBSession.query(Language).filter(Language.pk == lpk).one()