Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_content_negotiation():
"content negotiation - default - bibtex"
res = cn.content_negotiation(ids = '10.1126/science.169.3946.635')
assert str == str(res).__class__
if __name__=='__main__':
doi='10.1126/science.169.3946.635'
doi='10.1175/1520-0477(2001)082<1377:IOGPPT>2.3.CO;2k'
doi='10.1029/2002JD002499'
doi='10.1175/1525-7541(2003)004<1147:tvgpcp>2.0.co;2'
aa=cn.content_negotiation(ids = doi, format = "bibentry")
cr=Crossref(mailto='xugzhi1987@gmail.com')
works=cr.works(ids=doi)
pprint(works)
bb=crossRefToMetaDict(works['message'])
print(bb)
'''
eti=Etiquette('MeiTing-Trunk', 'v0.1alpha', 'not published yet',
'xugzhi1987@gmail.com')
print(str(eti))
works=Works(etiquette=eti)
#aa=works.doi('10.1590/0102-311x00133115')
aa=works.doi(doi)
pprint(aa)
def test_content_negotiation_style():
"content negotiation - style"
res_apa = cn.content_negotiation(ids = u'10.1126/science.169.3946.635', format = "text", style = "apa")
res_ieee = cn.content_negotiation(ids = u'10.1126/science.169.3946.635', format = "text", style = "ieee")
assert res_apa != res_ieee
if link and 'doi.org' in link:
doi = urlparse(link).path.strip('/')
# Extract the DOI using the title
else:
results = cr.works(query_title=title, limit=1)
if results['message']['total-results'] == 0 or \
results['message']['items'][0]['title'][0].lower() != title.lower():
log.warn(f'Could not find the doi for "{title}"')
continue
doi = results['message']['items'][0]['DOI']
try:
reference = cn.content_negotiation(doi)
lines.append(reference)
references.append(re.sub('^@.*{', '', reference.split('\n')[0]).strip(','))
except HTTPError:
log.warn(f'Could not Create reference for "{title}"')
with open(os.path.join(output_dir, 'references.bib'), 'w') as f:
f.write('\n\n'.join(lines))
return references
# Extract the DOI using the title
else:
results = cr.works(query_bibliographic=title, limit=1)
if (
results["message"]["total-results"] == 0
or results["message"]["items"][0]["title"][0].lower() != title.lower()
):
log.warn(f'Could not find the doi for "{title}"')
continue
doi = results["message"]["items"][0]["DOI"]
try:
reference = cn.content_negotiation(doi)
lines.append(reference)
references.append(re.sub("^@.*{", "", reference.split("\n")[0]).strip(","))
except HTTPError:
log.warn(f'Could not Create reference for "{title}"')
with open(os.path.join(output_dir, "references.bib"), "w") as f:
f.write("\n\n".join(lines))
return references
def batch_doi2pmid(dois):
"""
resolve article PMID from DOI by feeding article citation to PubMed advanced search
@param dois: list of DOIs to resolve
@return: list of corresponding PMIDs
"""
citations = []
for doi in dois:
if doi[-1] == '.':
doi = doi[:-1]
try:
# what if one fails?!
cit = cn.content_negotiation(ids=doi, format="citeproc-json")
if isinstance(cit, list):
for c in cit:
citations.append(c)
else:
citations.append(cit)
except Exception as e:
print e
continue
parsed_citations = []
for x in citations:
try:
cit = json.loads(x)
except TypeError as e:
print e
continue
parsed_cit = {}
def load_records(self, DOIs=None):
"""Load all crossref items as valid records"""
records = cn.content_negotiation(ids=DOIs, format="citeproc-json")
# Records might be a str or unicode (python 2)
if not isinstance(records, list):
records = [records]
self.records = []
for r in records:
data = json.loads(r)
try:
record = self.to_record(data)
except Exception:
e, v, tb = sys.exc_info()
msg = _(
"An error occured while loading the following DOI: {}. "
"Check logs for details."
).format(data.get("DOI"))
logger.error("{}, error: {} [{}], data: {}".format(msg, e, v, data))
raise DOILoaderError(msg)
if item['score'] < best_item['score']:
break
else:
best_item = pick_best(title, best_item, item)
# Retrieve DOI and json item
doi = best_item['DOI']
res_json = best_item
# If the entry is invalid, return a score of 0
if 'author' not in res_json or not res_json['title']:
print_score(0)
return (None, res_json, 0)
# Retrieve metadata as bibtex entry
res_bib = cn.content_negotiation(ids=doi, format="bibentry")
res_bib = re.sub('ä', 'ä', res_bib)
res_bib = re.sub('Ă', 'Ö', res_bib)
res_bib = re.sub('รถ', 'ö', res_bib)
res_bib = re.sub('Ăź', 'ü', res_bib)
res_bib = re.sub('Ěo', 'ö', res_bib)
res_bib = re.sub('ďż˝', 'ø', res_bib)
res_bib = re.sub('ĂŤ', 'ë', res_bib)
db = bibtexparser.loads(res_bib)
assert len(db.entries) == 1
res_bib = db.entries[0]
# If article has subtitle(s), fix bibtex entry
subtitles = None
if 'subtitle' in res_json:
subtitles = [x for x in res_json['subtitle'] if not str.isupper(x)]