How to use the markdown.markdown function in Markdown

To help you get started, we’ve selected a few Markdown examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github vulndb / data / tests / test_valid_markdown.py View on Github external
def test_valid_markdown(self):
        invalid = []

        for language, _file, db_data in self.get_all_json():
            description = self.get_description(language, db_data['description']['$ref'])
            try:
                markdown(description)
            except:
                invalid.append(_file)

            guidance = self.get_fix(language, db_data['fix']['guidance']['$ref'])
            try:
                markdown(guidance)
            except:
                invalid.append(_file)

        self.assertEqual(invalid, [])
github dataiap / dataiap / resources / hacco / hacco.py View on Github external
raise TypeError("Missing the required 'outdir' keyword argument.")
    language = get_language(source)

    output = pygments.highlight(language["divider_text"].join(section["code_text"] for section in sections),
                                language["lexer"],
                                formatters.get_formatter_by_name("html"))

    output = output.replace(highlight_start, "").replace(highlight_end, "")
    fragments = re.split(language["divider_html"], output)
    for i, section in enumerate(sections):
        section["code_html"] = highlight_start + shift(fragments, "") + highlight_end
        try:
            docs_text = unicode(section["docs_text"])
        except UnicodeError:
            docs_text = unicode(section["docs_text"].decode('utf-8'))
        dh = markdown(preprocess(docs_text,
                                 i,
                                 preserve_paths=preserve_paths,
                                 outdir=outdir))
        section["docs_html"] = [{'text':dh}] if dh != '' else []
        section["num"] = i
github maciejczyzewski / hyhyhy / src / 3.x / collector.py View on Github external
def parse(self, content=[], html='', id=''):
        for file in config.sections:
            print (prf('OK'), 'Parsing file', file, '...')

            id = file.split('.')[-2].split('/')[-1]

            if file.split('.')[-1] == 'md':
                html = markdown.markdown(open(file, 'r').read(), extensions=['markdown.extensions.fenced_code'])
            elif file.split('.')[-1] == 'html':
                html = open(file, 'r').read()

            if config.settings.has_option('sections', id):
                content.append([html, config.settings.get('sections', id)])
            else:
                content.append([html, ' '])

        return content
github Nexedi / erp5 / product / PortalTransforms / transforms / markdown_to_html.py View on Github external
def convert(self, orig, data, **kwargs):
        if HAS_MARKDOWN:
            # markdown expects unicode input:
            orig = unicode(orig.decode('utf-8'))
            # PortalTransforms, however expects a string as result,
            # so we encode the unicode result back to UTF8:
            html = markdown_transformer.markdown(orig).encode('utf-8')
        else:
            html = orig
        data.setData(html)
        return data
github bendangnuksung / bota / web_scrap / reddit_process.py View on Github external
def markdown_to_text(markdown_string):
    """ Converts a markdown string to plaintext """
    # md -> html -> text since BeautifulSoup can extract text cleanly
    html = markdown(markdown_string)
    # remove code snippets
    html = re.sub(r'<pre>(.*?)</pre>', ' ', html)
    html = re.sub(r'<code>(.*?)</code>', ' ', html)
    # extract text
    soup = BeautifulSoup(html, "html.parser")
    text = ''.join(soup.findAll(text=True))
    return text
github anotherjesse / firenomics / service / vendor / web / utils.py View on Github external
def safemarkdown(text):
    """
    Converts text to HTML following the rules of Markdown, but blocking any
    outside HTML input, so that only the things supported by Markdown
    can be used. Also converts raw URLs to links.

    (requires [markdown.py](http://webpy.org/markdown.py))
    """
    from markdown import markdown
    if text:
        text = text.replace('&lt;', '&lt;')
        # TODO: automatically get page title?
        text = r_url.sub(r'&lt;\1&gt;', text)
        text = markdown(text)
        return text
github andresriancho / w3af / w3af / core / ui / gui / scanrun.py View on Github external
return

        instance = self.get_instance(path)
        if not isinstance(instance, Info):
            return
        
        summary = instance.get_desc()
        self.kbbrowser.explanation.set_text(summary)
        self.kbbrowser.vuln_notebook.set_current_page(0)

        if instance.has_db_details():
            desc_markdown = instance.get_long_description()
            desc_markdown += '\n\n### Fix guidance\n'
            desc_markdown += instance.get_fix_guidance()
            desc_markdown += self._create_reference_list(instance)
            desc = markdown(desc_markdown)

            self.kbbrowser.description.load_html_string(desc, FILE)
        else:
            self.kbbrowser.description.load_html_string(DB_VULN_NOT_FOUND, FILE)

        if not instance.get_id():
            self.clear_request_response_viewer()
            return

        #
        # We have two different cases:
        #
        # 1) The object is related to ONLY ONE request / response
        # 2) The object is related to MORE THAN ONE request / response
        #
        # For 1), we show the classic view, and for 2) we show the classic
github pschwede / AnchorBot / web.py View on Github external
link = None
            try:
                link = DEHASHED[hashed]
            except KeyError:
                for article in b.database["articles"]:
                    if hashed == hash(article):
                        link = article
                        break
            if link:
                b.update_article(link, read=True)

                article = dict(b.database["articles"][link])
                article['source'] = __get_source_domain(link)
                article['date'] = time.ctime(article['release'])

                original_content = markdown.markdown(escape(article['content']))
                spaned_content = []
                for paragraph in [p for p in RE_PARAGRAPHS.findall(original_content) if p]:
                    sentences = [s for s in RE_SENTENCES.findall(paragraph) if s]
                    if not sentences:
                        continue
                    elif len(sentences) == 1:
                        spaned_content.append("<p><span>%s</span></p>" % sentences[0])
                    else:
                        spaned_content.append(
                                "<p>%s</p>" % \
                                ("<span>%s</span>"*3 % \
                                (sentences[0], "".join(sentences[1:-2]), sentences[-1]))
                                )
                article['spaned_content'] = " ".join(spaned_content)
                if keyword:
                    article['spaned_content'] = re_sub(r"(%s)" % keyword,
github kiwicom / the-zoo / zoo / auditing / models.py View on Github external
def description_html(self):
        return markdown.markdown(
            self.description_md,
            extensions=[
                "markdown.extensions.fenced_code",
                "markdown.extensions.smarty",
            ],
github sqlalchemy / sqlalchemy / doc / build / read_markdown.py View on Github external
def parse_markdown_files(toc, files):
    for inname in files:
        infile = 'content/%s.txt' % inname
        if not os.access(infile, os.F_OK):
            continue
        html = markdown.markdown(file(infile).read())
        tree = et.fromstring("" + html + "")
        (title, toc_element) = create_toc(inname, tree, toc)
        safety_code(tree)
        replace_pre_with_mako(tree)
        process_rel_href(tree)
        outname = 'output/%s.html' % inname
        print infile, '-&gt;', outname
        outfile = utf8stream(file(outname, 'w'))
        outfile.write(header(toc, title, inname))
        dump_tree(tree, outfile)