Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def get_suppressions(relative_filepaths, root, messages):
"""
Given every message which was emitted by the tools, and the
list of files to inspect, create a list of files to ignore,
and a map of filepath -> line-number -> codes to ignore
"""
paths_to_ignore = set()
lines_to_ignore = defaultdict(set)
messages_to_ignore = defaultdict(lambda: defaultdict(set))
# first deal with 'noqa' style messages
for filepath in relative_filepaths:
abspath = os.path.join(root, filepath)
try:
file_contents = encoding.read_py_file(abspath).split('\n')
except encoding.CouldNotHandleEncoding as err:
# TODO: this output will break output formats such as JSON
warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning)
continue
ignore_file, ignore_lines = get_noqa_suppressions(file_contents)
if ignore_file:
paths_to_ignore.add(filepath)
lines_to_ignore[filepath] |= ignore_lines
# now figure out which messages were suppressed by pylint
pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages)
paths_to_ignore |= pylint_ignore_files
for filepath, line in pylint_ignore_messages.items():
for line_number, codes in line.items():
for code in codes:
messages_to_ignore[filepath][line_number].add(('pylint', code))
def run(self, found_files):
warnings = []
for filepath in found_files.iter_file_paths():
mimetype = mimetypes.guess_type(filepath)
if mimetype[0] is None or not mimetype[0].startswith('text/') or mimetype[1] is not None:
continue
try:
contents = read_py_file(filepath)
except CouldNotHandleEncoding:
continue
for line, code, message in check_file_contents(contents):
warnings.append({
'line': line, 'code': code, 'message': message,
'path': filepath
})
messages = []
for warning in warnings:
path = warning['path']
prefix = os.path.commonprefix([found_files.rootpath, path])
loc = Location(path, module_from_path(path[len(prefix):]), '', warning['line'], 0, absolute_path=True)
msg = Message('dodgy', warning['code'], loc, warning['message'])
messages.append(msg)
return messages
def run(self, found_files):
messages = []
for code_file in found_files.iter_module_paths():
try:
contents = read_py_file(code_file)
tree = ast.parse(
contents,
filename=code_file,
)
except CouldNotHandleEncoding as err:
messages.append(make_tool_error_message(
code_file, 'mccabe', 'MC0000',
message='Could not handle the encoding of this file: %s' % err.encoding
))
continue
except SyntaxError as err:
messages.append(make_tool_error_message(
code_file, 'mccabe', 'MC0000',
line=err.lineno, character=err.offset,
message='Syntax Error'
))
continue
except TypeError:
messages.append(make_tool_error_message(
code_file, 'mccabe', 'MC0000',
message='Unable to parse file'
def scavenge(self, _=None):
# The argument is a list of paths, but we don't care
# about that as we use the found_files object. The
# argument is here to explicitly acknowledge that we
# are overriding the Vulture.scavenge method.
for module in self._files.iter_module_paths():
try:
module_string = read_py_file(module)
except CouldNotHandleEncoding as err:
self._internal_messages.append(make_tool_error_message(
module, 'vulture', 'V000',
message='Could not handle the encoding of this file: %s' % err.encoding
))
continue
self.file = module
self.filename = module
try:
self.scan(module_string, filename=module)
except TypeError:
self.scan(module_string)
return open(filepath, 'rU').read()
else:
# see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
# first just see if the file is properly encoded
try:
with open(filepath, 'rb') as f:
tokenize.detect_encoding(f.readline)
except SyntaxError as err:
# this warning is issued:
# (1) in badly authored files (contains non-utf8 in a comment line)
# (2) a coding is specified, but wrong and
# (3) no coding is specified, and the default
# 'utf8' fails to decode.
# (4) the encoding specified by a pep263 declaration did not match
# with the encoding detected by inspecting the BOM
raise CouldNotHandleEncoding(filepath, err)
try:
return tokenize.open(filepath).read()
# this warning is issued:
# (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
# (see http://stackoverflow.com/a/5552623)
except UnicodeDecodeError as err:
raise CouldNotHandleEncoding(filepath, err)
location = Location(
path=code_file,
module=None,
function='',
line=error.line,
character=0,
absolute_path=True,
)
message = Message(
source='pep257',
code=error.code,
location=location,
message=error.message.partition(':')[2].strip(),
)
messages.append(message)
except CouldNotHandleEncoding as err:
messages.append(make_tool_error_message(
code_file, 'pep257', 'D000',
message='Could not handle the encoding of this file: %s' % err.encoding
))
continue
except AllError as exc:
# pep257's Parser.parse_all method raises AllError when an
# attempt to analyze the __all__ definition has failed. This
# occurs when __all__ is too complex to be parsed.
messages.append(make_tool_error_message(
code_file, 'pep257', 'D000',
line=1, character=0,
message=exc.args[0]
))
continue
# this warning is issued:
# (1) in badly authored files (contains non-utf8 in a comment line)
# (2) a coding is specified, but wrong and
# (3) no coding is specified, and the default
# 'utf8' fails to decode.
# (4) the encoding specified by a pep263 declaration did not match
# with the encoding detected by inspecting the BOM
raise CouldNotHandleEncoding(filepath, err)
try:
return tokenize.open(filepath).read()
# this warning is issued:
# (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
# (see http://stackoverflow.com/a/5552623)
except UnicodeDecodeError as err:
raise CouldNotHandleEncoding(filepath, err)