How to use the bandit.core.constants function in bandit

To help you get started, we’ve selected a few bandit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PyCQA / bandit / tests / unit / core / test_issue.py View on Github external
def test_issue_filter_confidence(self):
        levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
        issues = [_get_issue_instance(bandit.HIGH, l) for l in levels]

        for level in levels:
            rank = constants.RANKING.index(level)
            for i in issues:
                test = constants.RANKING.index(i.confidence)
                result = i.filter(bandit.UNDEFINED, level)
                self.assertTrue((test >= rank) == result)
github PyCQA / bandit / tests / unit / formatters / test_json.py View on Github external
lineno=2)]

        self.manager.out_file = self.tmp_fname

        self.issue.fname = self.context['filename']
        self.issue.lineno = self.context['lineno']
        self.issue.linerange = self.context['linerange']
        self.issue.test = self.check_name

        self.manager.results.append(self.issue)
        self.manager.metrics = metrics.Metrics()

        # mock up the metrics
        for key in ['_totals', 'binding.py']:
            self.manager.metrics.data[key] = {'loc': 4, 'nosec': 2}
            for (criteria, default) in constants.CRITERIA:
                for rank in constants.RANKING:
                    self.manager.metrics.data[key]['{0}.{1}'.format(
                        criteria, rank
                    )] = 0
github PyCQA / bandit / tests / functional / test_functional.py View on Github external
:param example_script: Filename of an example script to test
        :param expect: dict with expected values of metrics
        '''
        self.b_mgr.metrics = metrics.Metrics()
        self.b_mgr.scores = []
        self.run_example(example_script)

        # test general metrics (excludes issue counts)
        m = self.b_mgr.metrics.data
        for k in expect:
            if k != 'issues':
                self.assertEqual(expect[k], m['_totals'][k])
        # test issue counts
        if 'issues' in expect:
            for (criteria, default) in C.CRITERIA:
                for rank in C.RANKING:
                    label = '{0}.{1}'.format(criteria, rank)
                    expected = 0
                    if expect['issues'].get(criteria).get(rank):
                        expected = expect['issues'][criteria][rank]
                    self.assertEqual(expected, m['_totals'][label])
github PyCQA / bandit / bandit / cli / main.py View on Github external
# initiate file discovery step within Bandit Manager
    b_mgr.discover_files(args.targets, args.recursive, args.excluded_paths)

    if not b_mgr.b_ts.tests:
        LOG.error('No tests would be run, please check the profile.')
        sys.exit(2)

    # initiate execution of tests within Bandit Manager
    b_mgr.run_tests()
    LOG.debug(b_mgr.b_ma)
    LOG.debug(b_mgr.metrics)

    # trigger output of results by Bandit Manager
    sev_level = constants.RANKING[args.severity - 1]
    conf_level = constants.RANKING[args.confidence - 1]
    b_mgr.output_results(args.context_lines,
                         sev_level,
                         conf_level,
                         args.output_file,
                         args.output_format,
                         args.msg_template)

    if (b_mgr.results_count(sev_filter=sev_level, conf_filter=conf_level) > 0
            and not args.exit_zero):
        sys.exit(1)
    else:
        sys.exit(0)
github PyCQA / bandit / bandit / formatters / screen.py View on Github external
def get_metrics(manager):
    bits = []
    bits.append(header("\nRun metrics:"))
    for (criteria, _) in constants.CRITERIA:
        bits.append("\tTotal issues (by %s):" % (criteria.lower()))
        for rank in constants.RANKING:
            bits.append("\t\t%s: %s" % (
                rank.capitalize(),
                manager.metrics.data['_totals']['%s.%s' % (criteria, rank)]))
    return '\n'.join([str(bit) for bit in bits])
github PyCQA / bandit / bandit / core / node_visitor.py View on Github external
def __init__(self, fname, metaast, testset,
                 debug, nosec_lines, metrics):
        self.debug = debug
        self.nosec_lines = nosec_lines
        self.seen = 0
        self.scores = {
            'SEVERITY': [0] * len(constants.RANKING),
            'CONFIDENCE': [0] * len(constants.RANKING)
        }
        self.depth = 0
        self.fname = fname
        self.metaast = metaast
        self.testset = testset
        self.imports = set()
        self.import_aliases = {}
        self.tester = b_tester.BanditTester(
            self.testset, self.debug, nosec_lines)

        # in some cases we can't determine a qualified name
        try:
            self.namespace = b_utils.get_module_qualname_from_path(fname)
        except b_utils.InvalidModulePath:
            LOG.info('Unable to find qualified name for module: %s',
github PyCQA / bandit / bandit / formatters / text.py View on Github external
def get_metrics(manager):
    bits = []
    bits.append("\nRun metrics:")
    for (criteria, _) in constants.CRITERIA:
        bits.append("\tTotal issues (by %s):" % (criteria.lower()))
        for rank in constants.RANKING:
            bits.append("\t\t%s: %s" % (
                rank.capitalize(),
                manager.metrics.data['_totals']['%s.%s' % (criteria, rank)]))
    return '\n'.join([bit for bit in bits])
github PyCQA / bandit / bandit / core / config.py View on Github external
def _init_plugin_name_pattern(self):
        '''Sets settings['plugin_name_pattern'] from default or config file.'''
        plugin_name_pattern = constants.plugin_name_pattern
        if self.get_option('plugin_name_pattern'):
            plugin_name_pattern = self.get_option('plugin_name_pattern')
        self._settings['plugin_name_pattern'] = plugin_name_pattern