How to use the bandit.core.constants.RANKING function in bandit

To help you get started, we’ve selected a few bandit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PyCQA / bandit / tests / unit / formatters / test_yaml.py View on Github external
def test_report(self, get_issue_list):
        self.manager.files_list = ['binding.py']
        self.manager.scores = [{'SEVERITY': [0] * len(constants.RANKING),
                                'CONFIDENCE': [0] * len(constants.RANKING)}]

        get_issue_list.return_value = collections.OrderedDict(
            [(self.issue, self.candidates)])

        with open(self.tmp_fname, 'w') as tmp_file:
            b_json.report(self.manager, tmp_file, self.issue.severity,
                          self.issue.confidence)

        with open(self.tmp_fname) as f:
            data = yaml.load(f.read())
            self.assertIsNotNone(data['generated_at'])
            self.assertEqual(self.tmp_fname, data['results'][0]['filename'])
            self.assertEqual(self.issue.severity,
                             data['results'][0]['issue_severity'])
            self.assertEqual(self.issue.confidence,
github PyCQA / bandit / tests / functional / test_functional.py View on Github external
:param example_script: Filename of an example script to test
        :param expect: dict with expected counts of issue types
        '''
        # reset scores for subsequent calls to check_example
        self.b_mgr.scores = []
        self.run_example(example_script, ignore_nosec=ignore_nosec)

        result = {
            'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0},
            'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}
        }

        for test_scores in self.b_mgr.scores:
            for score_type in test_scores:
                self.assertIn(score_type, expect)
                for idx, rank in enumerate(C.RANKING):
                    result[score_type][rank] = (test_scores[score_type][idx] //
                                                C.RANKING_VALUES[rank])

        self.assertDictEqual(expect, result)
github PyCQA / bandit / tests / functional / test_functional.py View on Github external
:param example_script: Filename of an example script to test
        :param expect: dict with expected counts of issue types
        '''
        # reset scores for subsequent calls to check_example
        self.b_mgr.scores = []
        self.run_example(example_script, ignore_nosec=ignore_nosec)

        result = {
            'SEVERITY': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0},
            'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0}
        }

        for test_scores in self.b_mgr.scores:
            for score_type in test_scores:
                self.assertIn(score_type, expect)
                for idx, rank in enumerate(C.RANKING):
                    result[score_type][rank] = (test_scores[score_type][idx] /
                                                C.RANKING_VALUES[rank])

        self.assertDictEqual(expect, result)
github PyCQA / bandit / tests / unit / core / test_issue.py View on Github external
def test_issue_filter_severity(self):
        levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
        issues = [_get_issue_instance(l, bandit.HIGH) for l in levels]

        for level in levels:
            rank = constants.RANKING.index(level)
            for i in issues:
                test = constants.RANKING.index(i.severity)
                result = i.filter(level, bandit.UNDEFINED)
                self.assertTrue((test >= rank) == result)
github PyCQA / bandit / bandit / core / metrics.py View on Github external
def __init__(self):
        self.data = dict()
        self.data['_totals'] = {'loc': 0, 'nosec': 0}

        # initialize 0 totals for criteria and rank; this will be reset later
        for rank in constants.RANKING:
            for criteria in constants.CRITERIA:
                self.data['_totals']['{0}.{1}'.format(criteria[0], rank)] = 0
github PyCQA / bandit / bandit / core / issue.py View on Github external
This function determines whether an issue should be included by
        comparing the severity and confidence rating of the issue to minimum
        thresholds specified in 'severity' and 'confidence' respectively.

        Formatters should call manager.filter_results() directly.

        This will return false if either the confidence or severity of the
        issue are lower than the given threshold values.

        :param severity: Severity threshold
        :param confidence: Confidence threshold
        :return: True/False depending on whether issue meets threshold

        '''
        rank = constants.RANKING
        return (rank.index(self.severity) >= rank.index(severity) and
                rank.index(self.confidence) >= rank.index(confidence))
github PyCQA / bandit / bandit / core / node_visitor.py View on Github external
def __init__(self, fname, metaast, testset,
                 debug, nosec_lines, metrics):
        self.debug = debug
        self.nosec_lines = nosec_lines
        self.seen = 0
        self.scores = {
            'SEVERITY': [0] * len(constants.RANKING),
            'CONFIDENCE': [0] * len(constants.RANKING)
        }
        self.depth = 0
        self.fname = fname
        self.metaast = metaast
        self.testset = testset
        self.imports = set()
        self.import_aliases = {}
        self.tester = b_tester.BanditTester(
            self.testset, self.debug, nosec_lines)

        # in some cases we can't determine a qualified name
        try:
            self.namespace = b_utils.get_module_qualname_from_path(fname)
        except b_utils.InvalidModulePath:
            LOG.info('Unable to find qualified name for module: %s',
                     self.fname)
github PyCQA / bandit / bandit / core / result_store.py View on Github external
def _check_severity(self, severity):
        '''Check severity level

        returns true if the issue severity is above the threshold.
        :param severity: the severity of the issue being checked
        :return: boolean result
        '''
        return constants.RANKING.index(severity) >= self.sev_level
github PyCQA / bandit / bandit / core / result_store.py View on Github external
'''Prints the contents of the result store

        :param scope: Which files were inspected
        :param scores: The scores awarded to each file in the scope
        :param lines: # of lines around the issue line to display (optional)
        :param sev_level: What level of severity to display (optional)
        :param conf_level: What level of confidence to display (optional)
        :param output_filename: File to output the results (optional)
        :param output_format: File type to output (csv|json|txt|xml)
        :return: -
        '''

        if not excluded_files:
            excluded_files = []

        if sev_level >= len(constants.RANKING):
            sev_level = len(constants.RANKING) - 1
        if conf_level >= len(constants.RANKING):
            conf_level = len(constants.RANKING) - 1

        self.sev_level = sev_level
        self.conf_level = conf_level
        self.max_lines = lines
        self.format = output_format
        self.out_file = output_filename

        try:
            self._write_report(files_list, scores, excluded_files)
        except IOError:
            print("Unable to write to file: %s" % self.out_file)