Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
full = os.path.join(path, entry)
testcase_name = None
if os.path.isfile(full):
testcase_name = make_name(full, parser, backend, version, file_format, entry)
elif os.path.isdir(full):
if parser == 'BaseParser':
continue # skip separate files for the BaseParser
full = os.path.join(full, 'spec', 'swagger.%s' % (file_format))
if os.path.isfile(full):
testcase_name = make_name(full, parser, backend, version, file_format, entry)
if testcase_name:
dirname = os.path.dirname(full)
dirname = dirname.replace('\\', '\\\\')
from prance.util import url
absurl = url.absurl(os.path.abspath(full)).geturl()
code = """
@pytest.mark.xfail
def %s():
import os
cur = os.getcwd()
os.chdir('%s')
from prance import %s
try:
parser = %s('%s', backend = '%s')
finally:
os.chdir(cur)
""" % (testcase_name, dirname, parser, parser, absurl, backend)
print(code)
exec(code, globals())
def test_convert_url():
from prance.util import url
converted, content_type = convert.convert_url(url.absurl('python://tests/specs/petstore.yaml'))
# Check correct content type
assert 'yaml' in content_type
# Parsing can't fail.
from prance.util import formats
parsed = formats.parse_spec(converted, content_type = content_type)
# Assert the correct target version
assert 'openapi' in parsed
assert parsed['openapi'].startswith('3.')
if entry.startswith('.'): # skip hidden files
continue
if os.path.isfile(full):
testcase_name = make_name(full, parser, backend, version, file_format, entry)
elif os.path.isdir(full):
if parser == 'BaseParser':
continue # skip separate files for the BaseParser
full = os.path.join(full, 'spec', 'swagger.%s' % (file_format))
if os.path.isfile(full):
testcase_name = make_name(full, parser, backend, version, file_format, entry)
if testcase_name:
dirname = os.path.dirname(full)
dirname = dirname.replace('\\', '\\\\')
from prance.util import url
absurl = url.absurl(os.path.abspath(full)).geturl()
code = """
@pytest.mark.xfail
def %s():
import os
cur = os.getcwd()
os.chdir('%s')
from prance import %s
try:
parser = %s('%s', backend = '%s')
finally:
os.chdir(cur)
""" % (testcase_name, dirname, parser, parser, absurl, backend)
print(code)
exec(code, globals())
def test_absurl_http():
test = 'http://foo.bar/asdf/#lala/quux'
res = url.absurl(test)
assert res.geturl() == test
def test_urlresource():
parsed = url.absurl('http://foo.bar/asdf?some=query#myfrag')
res = url.urlresource(parsed)
assert res == 'http://foo.bar/asdf'
:param callable recursion_limit_handler: [optional] A callable that
gets invoked when the recursion_limit is reached. Defaults to
raising ResolutionError. Receives the recursion_limit as the
first parameter, and the parsed reference URL as the second.
"""
import copy
self.specs = copy.deepcopy(specs)
self.url = url
self.__reclimit = options.get('recursion_limit', 1)
self.__reclimit_handler = options.get('recursion_limit_handler',
default_reclimit_handler)
self.__reference_cache = options.get('reference_cache', {})
if self.url:
self.parsed_url = _url.absurl(self.url)
self._url_key = _url.urlresource(self.parsed_url)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
if self.specs:
self.__reference_cache[self._url_key] = self.specs
else:
self.parsed_url = self._url_key = None
def _dereference(self, ref_url, obj_path, recursions):
"""
Dereference the URL and object path.
Returns the dereferenced object.
:param mixed ref_url: The URL at which the reference is located.
:param list obj_path: The object path within the URL resource.
:param tuple recursions: A recursion stack for resolving references.
:return: A copy of the dereferenced value, with all internal references
resolved.
"""
# In order to start dereferencing anything in the referenced URL, we have
# to read and parse it, of course.
contents = _url.fetch_url(ref_url, self.__reference_cache)
# In this inner parser's specification, we can now look for the referenced
# object.
value = contents
if len(obj_path) != 0:
from prance.util.path import path_get
try:
value = path_get(value, obj_path)
except KeyError:
raise _url.ResolutionError('Cannot resolve reference "%s"!'
% (ref_url.geturl(), ))
# Deep copy value; we don't want to create recursive structures
import copy
value = copy.deepcopy(value)
def convert(url_or_path, output_file):
"""
Convert the given spec to OpenAPI 3.x.y.
The conversion uses the web API provided by mermade.org.uk to perform the
conversion. As long as that service is kept up-to-date and you have an
internet connection, conversion should work and should convert to the latest
version of the specs.
"""
# Convert call
from .util import url
import os
absurl = url.absurl(url_or_path, os.getcwd())
from .convert import convert_url
content, content_type = convert_url(absurl)
# Write output
if output_file is None:
click.echo(content)
else:
from .util import fs
fs.write_file(output_file, content)
gets invoked when the recursion_limit is reached. Defaults to
raising ResolutionError. Receives the recursion_limit as the
first parameter, and the parsed reference URL as the second.
"""
import copy
self.specs = copy.deepcopy(specs)
self.url = url
self.__reclimit = options.get('recursion_limit', 1)
self.__reclimit_handler = options.get('recursion_limit_handler',
default_reclimit_handler)
self.__reference_cache = options.get('reference_cache', {})
if self.url:
self.parsed_url = _url.absurl(self.url)
self._url_key = _url.urlresource(self.parsed_url)
# If we have a url, we want to add ourselves to the reference cache
# - that creates a reference loop, but prevents child resolvers from
# creating a new resolver for this url.
if self.specs:
self.__reference_cache[self._url_key] = self.specs
else:
self.parsed_url = self._url_key = None
def _dereferencing_iterator(self, base_url, partial, path, recursions):
"""
Iterate over a partial spec, dereferencing all references within.
Yields the resolved path and value of all items that need substituting.
:param mixed base_url: URL that the partial specs is located at.
:param dict partial: The partial specs to work on.
:param tuple path: The parent path of the partial specs.
:param tuple recursions: A recursion stack for resolving references.
"""
from .iterators import reference_iterator
for _, refstring, item_path in reference_iterator(partial):
# Split the reference string into parsed URL and object path
ref_url, obj_path = _url.split_url_reference(base_url, refstring)
# The reference path is the url resource and object path
ref_path = (_url.urlresource(ref_url), tuple(obj_path))
# Count how often the reference path has been recursed into.
from collections import Counter
rec_counter = Counter(recursions)
next_recursions = recursions + (ref_path,)
if rec_counter[ref_path] >= self.__reclimit:
# The referenced value may be produced by the handler, or the handler
# may raise, etc.
ref_value = self.__reclimit_handler(self.__reclimit, ref_url)
else:
# The referenced value is to be used, but let's copy it to avoid
# building recursive structures.