How to use the bleach.utils.alphabetize_attributes function in bleach

To help you get started, we’ve selected a few bleach examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mozilla / bleach / bleach / sanitizer.py View on Github external
# Drop href and xlink:href attr for svg elements with non-local IRIs
                if (None, token['name']) in self.svg_allow_local_href:
                    if namespaced_name in [
                            (None, 'href'), (html5lib_shim.namespaces['xlink'], 'href')
                    ]:
                        if re.search(r'^\s*[^#\s]', val):
                            continue

                # If it's a style attribute, sanitize it
                if namespaced_name == (None, 'style'):
                    val = self.sanitize_css(val)

                # At this point, we want to keep the attribute, so add it in
                attrs[namespaced_name] = val

            token['data'] = alphabetize_attributes(attrs)

        return token
github mozilla / bleach / bleach / linkifier.py View on Github external
attrs = {
                        (None, 'href'): 'mailto:%s' % match.group(0),
                        '_text': match.group(0)
                    }
                    attrs = self.apply_callbacks(attrs, True)

                    if attrs is None:
                        # Just add the text--but not as a link
                        new_tokens.append(
                            {'type': 'Characters', 'data': match.group(0)}
                        )

                    else:
                        # Add an "a" tag for the new link
                        _text = attrs.pop('_text', '')
                        attrs = alphabetize_attributes(attrs)
                        new_tokens.extend([
                            {'type': 'StartTag', 'name': 'a', 'data': attrs},
                            {'type': 'Characters', 'data': force_unicode(_text)},
                            {'type': 'EndTag', 'name': 'a'}
                        ])
                    end = match.end()

                if new_tokens:
                    # Yield the adjusted set of tokens and then continue
                    # through the loop
                    if end < len(text):
                        new_tokens.append({'type': 'Characters', 'data': text[end:]})

                    for new_token in new_tokens:
                        yield new_token
github mozilla / bleach / bleach / sanitizer.py View on Github external
:returns: token or list of tokens

        """
        token_type = token['type']
        if token_type in ['StartTag', 'EndTag', 'EmptyTag']:
            if token['name'] in self.allowed_elements:
                return self.allow_token(token)

            elif self.strip_disallowed_elements:
                return None

            else:
                if 'data' in token:
                    # Alphabetize the attributes before calling .disallowed_token()
                    # so that the resulting string is stable
                    token['data'] = alphabetize_attributes(token['data'])
                return self.disallowed_token(token)

        elif token_type == 'Comment':
            if not self.strip_html_comments:
                return token
            else:
                return None

        elif token_type == 'Characters':
            return self.sanitize_characters(token)

        else:
            return token
github mozilla / bleach / bleach / linkifier.py View on Github external
if attrs is None:
                        # Just add the text
                        new_tokens.append(
                            {'type': 'Characters', 'data': prefix + url + suffix}
                        )

                    else:
                        # Add the "a" tag!
                        if prefix:
                            new_tokens.append(
                                {'type': 'Characters', 'data': prefix}
                            )

                        _text = attrs.pop('_text', '')
                        attrs = alphabetize_attributes(attrs)

                        new_tokens.extend([
                            {'type': 'StartTag', 'name': 'a', 'data': attrs},
                            {'type': 'Characters', 'data': force_unicode(_text)},
                            {'type': 'EndTag', 'name': 'a'},
                        ])

                        if suffix:
                            new_tokens.append(
                                {'type': 'Characters', 'data': suffix}
                            )

                    end = match.end()

                if new_tokens:
                    # Yield the adjusted set of tokens and then continue