Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_subdivision_derived_fields(self):
address = Address(
line1='31, place du Théatre',
postal_code='59000',
city_name='Lille',
subdivision_code='FR-59')
self.assertEquals(
address.subdivision, subdivisions.get(code='FR-59'))
self.assertEquals(
address.subdivision_code, 'FR-59')
self.assertEquals(
address.subdivision_name, 'Nord')
self.assertEquals(
address.subdivision_type_name, 'Metropolitan department')
self.assertEquals(
address.subdivision_type_id, 'metropolitan_department')
self.assertEquals(
address.metropolitan_department, subdivisions.get(code='FR-59'))
self.assertEquals(
address.metropolitan_department_code, 'FR-59')
self.assertEquals(
address.metropolitan_department_name, 'Nord')
self.assertEquals(
def test_subdivision_type_id_collision(self):
# The subdivision metadata IDs we derived from subdivision types should
# not collide with Address class internals.
simple_address = Address(
line1='10, avenue des Champs Elysées',
postal_code='75008',
city_name='Paris',
country_code='FR')
# Check each subdivision metadata.
for subdiv in subdivisions:
# XXX ISO 3166-2 reuse the country type as subdivisions.
# We really need to add proper support for these cases, as we did
# for cities.
if subdivision_type_id(subdiv) in ['country']:
continue
for metadata_id, metadata_value in subdivision_metadata(
subdiv).items():
# Check collision with any atrribute defined on Address class.
if metadata_id in Address.SUBDIVISION_METADATA_WHITELIST:
self.assertTrue(hasattr(simple_address, metadata_id))
else:
self.assertFalse(hasattr(simple_address, metadata_id))
def test_country_code_reconciliation(self):
# Test reconciliation of ISO 3166-2 and ISO 3166-1 country codes.
for subdiv_code in SUBDIVISION_ALIASES.keys():
target_code = SUBDIVISION_ALIASES[subdiv_code]
if len(target_code) != 2:
target_code = subdivisions.get(code=target_code).country_code
self.assertEquals(
normalize_country_code(subdiv_code), target_code)
for subdiv_code in set(
imap(attrgetter('code'), subdivisions)).difference(
SUBDIVISION_ALIASES):
self.assertEquals(
normalize_country_code(subdiv_code),
subdivisions.get(code=subdiv_code).country_code)
if airport_type != 'large_airport' and airport_type != 'small_airport' and airport_type != 'medium_airport': continue
airport_type = airport_type.split("_")[0]
airport_name = row['name'].strip()
airport_coords = { 'lat': float(row['latitude_deg']), 'lon': float(row['longitude_deg']) }
airport_city = row['municipality'].strip()
airport_url = row['home_link'].strip()
airport_wiki = row['wikipedia_link'].strip()
airport_icao = row['ident'].upper().strip()
# Add a '.' after single uppercase letters
airport_name = re.sub( r"\b([A-Z])(?![\w\-\.])", r"\1.", airport_name)
country_iso_code = row['iso_country'].strip().upper()[:2]
try :
country = pycountry.countries.get(alpha_2=country_iso_code)
country = country.name
except (KeyError, AttributeError) as err:
wf.logger.error("Error: {0} (Country: {1})".format(err, country_iso_code))
country = country_iso_code
airport_country = country
# Build our airport object.
airports.append( Map( id = airport_id, iata_code = iata_code, icao_code = airport_icao,
type = airport_type, name = airport_name, coords = airport_coords,
country = airport_country, city = airport_city, url = airport_url, wiki = airport_wiki ) )
# Sort the list by airport_type. Snce it's only 'Large', 'Medium' and 'Small', they should be sorted correctly.
airports = sorted(airports, key=lambda k: (k.type, k.iata_code))
return airports
Arguments:
short_langs --- Array of strings. Each string is the short name of
a language. Should be 3 characters long (more should be fine as
well)
Returns:
A dictionnary: Keys are the short languages name, values are the
corresponding long languages names.
"""
long_langs = {}
for short_lang in short_langs:
try:
try:
country = pycountry.languages.get(terminology=short_lang[:3])
except KeyError:
country = pycountry.languages.get(bibliographic=short_lang[:3])
extra = None
if "_" in short_lang:
extra = short_lang.split("_")[1]
long_lang = country.name
if extra != None:
long_lang += " (%s)" % (extra)
long_langs[short_lang] = long_lang
except KeyError, exc:
print ("Warning: Long name not found for language '%s'."
% (short_lang))
print (" Exception was: %s" % (str(exc)))
print (" Will use short name as long name.")
long_langs[short_lang] = short_lang
return long_langs
def __determine_iso_639_3_key():
""" Determine the key needed for accessing ISO 639-3
language codes using pycountry.
"""
# Different version of pycountry seem to use different keys.
# Try a couple (Note: all ISO639-2T codes are ISO639-3 codes
# as well)
for key3 in ["alpha_3", "iso639_3_code", "terminology", "iso639_2T_code", ]:
try:
ret = pycountry.languages.get(**{key3: "deu"})
if ret is None:
continue
return key3
except KeyError:
continue
raise SystemExit("Could not determine pycountry iso_639_3 key")
def strip_lang_subdomains_from_netloc(netloc):
if netloc.count('.') > 1:
subdomain, remaining_netloc = netloc.split('.', 1)
if len(subdomain) == 5 and '-' in subdomain:
lang, country = subdomain.split('-', 1)
if len(lang) == 2 and len(country) == 2:
if pycountry.countries.get(alpha_2=lang.upper()) and pycountry.countries.get(alpha_2=country.upper()):
netloc = remaining_netloc
elif len(subdomain) == 2:
if pycountry.countries.get(alpha_2=subdomain.upper()):
netloc = remaining_netloc
return netloc
self.stdout.write("Countries already populated; nothing to be done.")
sys.exit(0)
else:
raise CommandError(
"You already have countries in your database. This command "
"currently does not support updating existing countries.")
countries = [
Country(
iso_3166_1_a2=country.alpha2,
iso_3166_1_a3=country.alpha3,
iso_3166_1_numeric=country.numeric,
printable_name=country.name,
name=getattr(country, 'official_name', ''),
is_shipping_country=options['is_shipping'])
for country in pycountry.countries]
Country.objects.bulk_create(countries)
self.stdout.write("Successfully added %s countries." % len(countries))
TRANSIFEX_USER = ""
TRANSIFEX_PW = ""
csvFile = open("app/src/main/assets/translators.csv", "w")
contributorsFile = open("CONTRIBUTORS", "a")
r = requests.get('http://www.transifex.com/api/2/project/antennapod/languages/',
auth=(TRANSIFEX_USER, TRANSIFEX_PW))
for lang in r.json():
langContributers = lang['coordinators'] + lang['reviewers'] + lang['translators']
langContributers = sorted(langContributers, key=str.lower)
langCode = lang['language_code']
try:
langName = pycountry.languages.lookup(langCode).name
except:
try:
langName = pycountry.languages.lookup(
langCode.split('_')[0]).name + ' (' + langCode + ')'
except:
langName = lang['language_code']
print('\033[91mLanguage code not found:' + langCode + '\033[0m')
joinedTranslators = ', '.join(langContributers).replace(';', '');
contributorsFile.write(langName + ": " + joinedTranslators + '\n')
csvFile.write(langName + ';' + joinedTranslators + '\n')
print(langName + ';' + joinedTranslators)
csvFile.close()
contributorsFile.close()
For each short language name, figures out its long name.
Arguments:
short_langs --- Array of strings. Each string is the short name of
a language. Should be 3 characters long (more should be fine as
well)
Returns:
A dictionnary: Keys are the short languages name, values are the
corresponding long languages names.
"""
long_langs = {}
for short_lang in short_langs:
try:
try:
country = pycountry.languages.get(terminology=short_lang[:3])
except KeyError:
country = pycountry.languages.get(bibliographic=short_lang[:3])
extra = None
if "_" in short_lang:
extra = short_lang.split("_")[1]
long_lang = country.name
if extra != None:
long_lang += " (%s)" % (extra)
long_langs[short_lang] = long_lang
except KeyError, exc:
print ("Warning: Long name not found for language '%s'."
% (short_lang))
print (" Exception was: %s" % (str(exc)))
print (" Will use short name as long name.")
long_langs[short_lang] = short_lang
return long_langs