How to use the streamlit.info function in streamlit

To help you get started, we’ve selected a few streamlit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github awarebayes / RecNN / examples / streamlit_demo.py View on Github external
**= MODELSPATH**
            - [Data for Streamlit Demo](https://drive.google.com/file/d/1nuhHDdC4mCmiB7g0fmwUSOh1jEUQyWuz/view?usp=sharing)
             **= DATAPATH**
            - [ML20M Dataset](https://grouplens.org/datasets/movielens/20m/)
             **= ML20MPATH**
             
            p.s. ml20m is only needed for links.csv, I couldn't include it in my streamlit data because of copyright.
            This is all the data you need.
            """
        )

    if page == "🔨 Test Recommendation":

        st.header("Test the Recommendations")

        st.info("Upon the first opening the data will start loading."
                "\n Unfortunately there is no progress verbose in streamlit. Look in your console.")

        st.success('Data is loaded!')

        models = load_models(device)
        st.success('Models are loaded!')

        state, action, reward, next_state, done = get_batch(device)

        st.subheader('Here is a random batch sampled from testing environment:')
        if st.checkbox('Print batch info'):
            st.subheader('State')
            st.write(state)
            st.subheader('Action')
            st.write(action)
            st.subheader('Reward')
github MarcSkovMadsen / awesome-streamlit / src / pages / resources.py View on Github external
def write():
    """Writes content to the app"""
    ast.shared.components.title_awesome("Resources")
    st.sidebar.title("Resources")
    tags = ast.shared.components.multiselect(
        "Select Tag(s)", options=ast.database.TAGS, default=[]
    )

    author_all = ast.shared.models.Author(name="All", url="")
    author = st.selectbox("Select Author", options=[author_all] + ast.database.AUTHORS)
    if author == author_all:
        author = None
    show_awesome_resources_only = st.checkbox("Show Awesome Resources Only", value=True)
    if not tags:
        st.info(
            """Please note that **we list each resource under a most important tag only!**"""
        )
    resource_section = st.empty()

    with st.spinner("Loading resources ..."):
        markdown = resources.get_resources_markdown(
            tags, author, show_awesome_resources_only
        )
        resource_section.markdown(markdown)

    if st.sidebar.checkbox("Show Resource JSON"):
        st.subheader("Source JSON")
        st.write(ast.database.RESOURCES)

    tags = None
github MarcSkovMadsen / awesome-streamlit / scratchpad / experiments.py View on Github external
def card(header, body):
    lines = [card_begin_str(header), f"<p>{body}</p>", card_end_str()]
    html("".join(lines))


def br(n):
    html(n * "<br>")


card("This works", "I can insert text inside a card")

br(2)

html(card_begin_str("This does not work"))
st.info("I cannot insert an st.info element inside a card")
html(card_end_str())
github MarcSkovMadsen / awesome-streamlit / gallery / kickstarter_dashboard / kickstarter_dashboard.py View on Github external
def main():
    """A Reactive View of the KickstarterDashboard"""
    kickstarter_df = get_kickstarter_df()
    kickstarter_dashboard = KickstarterDashboard(kickstarter_df=kickstarter_df)
    st.markdown(__doc__)
    st.info(INFO)

    options = get_categories()
    categories_selected = st.multiselect("Select Categories", options=options)
    if not categories_selected and kickstarter_dashboard.categories:
        kickstarter_dashboard.categories = []
    else:
        kickstarter_dashboard.categories = categories_selected

    st.sidebar.title("Selections")
    x_range = st.sidebar.slider("Select create_at range", 2009, 2018, (2009, 2018))
    y_range = st.sidebar.slider("Select usd_pledged", 0.0, 5.0, (0.0, 5.0))
    filter_df = KickstarterDashboard.filter_on_categories(kickstarter_df, categories_selected)
    filter_df = kickstarter_dashboard.filter_on_ranges(
        filter_df, (pd.Timestamp(x_range[0], 1, 1), pd.Timestamp(x_range[1], 12, 31)), y_range
    )
    kickstarter_dashboard.scatter_df = filter_df
github streamlit / streamlit / e2e / scripts / info.py View on Github external
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import streamlit as st

st.info("This info message is awesome!")
github ICLRandD / Blackstone / black_lit.py View on Github external
nlp = spacy.load('en_blackstone_proto')
    print ("model loaded!")
    return nlp(text)


st.sidebar.title("Interactive spaCy visualizer")
st.sidebar.markdown(
    """
Process text with [spaCy](https://spacy.io) models and visualize named entities,
dependencies and more. Uses spaCy's built-in
[displaCy](http://spacy.io/usage/visualizers) visualizer under the hood.
"""
)


model_load_state = st.info(f"Loading model '{spacy_model}'...")
nlp = spacy.load('en_blackstone_proto')
model_load_state.empty()

text = st.text_area("Text to analyze", DEFAULT_TEXT)
doc = process_text(spacy_model, text)

if "parser" in nlp.pipe_names:
    st.header("Dependency Parse & Part-of-speech tags")
    st.sidebar.header("Dependency Parse")
    split_sents = st.sidebar.checkbox("Split sentences", value=True)
    collapse_punct = st.sidebar.checkbox("Collapse punctuation", value=True)
    collapse_phrases = st.sidebar.checkbox("Collapse phrases")
    compact = st.sidebar.checkbox("Compact mode")
    options = {
        "collapse_punct": collapse_punct,
        "collapse_phrases": collapse_phrases,
github MarcSkovMadsen / awesome-streamlit / gallery / nba_roster_turnover / roster_turnover.py View on Github external
def main():
    st.title("NBA Roster Turnover vs Wins")
    st.header("Summary")
    st.info(
        """
**Roster turnover** is defined as the sum of the absolute difference between minutes played by each
player from year to year. There is a **significant negative correlation** with higher turnover and
regular season wins."""
    )
    st.markdown(
        f"""
    Source Data: [Player Minutes]({PLAYER_MINUTES_GITHUB}), [Roster Turnover]({ROSTER_TURNOVER_GITHUB}),
    [Teams Data]({TEAMS_DATA_GITHUB})
        """
    )

    # Loading data
    with st.spinner("Loading data ..."):
        image = get_image()
        player_minutes = load_player_minutes().copy(deep=True)
github Jcharis / Streamlit_DataScience_Apps / news_classifier_nlp-app / app.py View on Github external
prediction = predictor.predict(vect_text)
				# st.write(prediction)
			elif model_choice == 'NB':
				predictor = load_prediction_models("models/newsclassifier_NB_model.pkl")
				prediction = predictor.predict(vect_text)
				# st.write(prediction)
			elif model_choice == 'DECISION_TREE':
				predictor = load_prediction_models("models/newsclassifier_CART_model.pkl")
				prediction = predictor.predict(vect_text)
				# st.write(prediction)

			final_result = get_key(prediction,prediction_labels)
			st.success("News Categorized as:: {}".format(final_result))

	if choice == 'NLP':
		st.info("Natural Language Processing of Text")
		raw_text = st.text_area("Enter News Here","Type Here")
		nlp_task = ["Tokenization","Lemmatization","NER","POS Tags"]
		task_choice = st.selectbox("Choose NLP Task",nlp_task)
		if st.button("Analyze"):
			st.info("Original Text::\n{}".format(raw_text))

			docx = nlp(raw_text)
			if task_choice == 'Tokenization':
				result = [token.text for token in docx ]
			elif task_choice == 'Lemmatization':
				result = ["'Token':{},'Lemma':{}".format(token.text,token.lemma_) for token in docx]
			elif task_choice == 'NER':
				result = [(entity.text,entity.label_)for entity in docx.ents]
			elif task_choice == 'POS Tags':
				result = ["'Token':{},'POS':{},'Dependency':{}".format(word.text,word.tag_,word.dep_) for word in docx]
github ICLRandD / Blackstone / blackstream.py View on Github external
nlp = load_model(model_name)
    print ("model loaded!")
    return nlp(text)


st.sidebar.title("Interactive spaCy visualizer")
st.sidebar.markdown(
    """
Process text with [spaCy](https://spacy.io) models and visualize named entities,
dependencies and more. Uses spaCy's built-in
[displaCy](http://spacy.io/usage/visualizers) visualizer under the hood.
"""
)

spacy_model = st.sidebar.selectbox("Model name", SPACY_MODEL_NAMES)
model_load_state = st.info(f"Loading model '{spacy_model}'...")
nlp = load_model(spacy_model)
model_load_state.empty()

text = st.text_area("Text to analyze", DEFAULT_TEXT)
doc = process_text(spacy_model, text)

if "parser" in nlp.pipe_names:
    st.header("Dependency Parse & Part-of-speech tags")
    st.sidebar.header("Dependency Parse")
    split_sents = st.sidebar.checkbox("Split sentences", value=True)
    collapse_punct = st.sidebar.checkbox("Collapse punctuation", value=True)
    collapse_phrases = st.sidebar.checkbox("Collapse phrases")
    compact = st.sidebar.checkbox("Compact mode")
    options = {
        "collapse_punct": collapse_punct,
        "collapse_phrases": collapse_phrases,
github explosion / spaCy / examples / streamlit_spacy.py View on Github external
def process_text(model_name, text):
    nlp = load_model(model_name)
    return nlp(text)


st.sidebar.title("Interactive spaCy visualizer")
st.sidebar.markdown(
    """
Process text with [spaCy](https://spacy.io) models and visualize named entities,
dependencies and more. Uses spaCy's built-in
[displaCy](http://spacy.io/usage/visualizers) visualizer under the hood.
"""
)

spacy_model = st.sidebar.selectbox("Model name", SPACY_MODEL_NAMES)
model_load_state = st.info(f"Loading model '{spacy_model}'...")
nlp = load_model(spacy_model)
model_load_state.empty()

text = st.text_area("Text to analyze", DEFAULT_TEXT)
doc = process_text(spacy_model, text)

if "parser" in nlp.pipe_names:
    st.header("Dependency Parse & Part-of-speech tags")
    st.sidebar.header("Dependency Parse")
    split_sents = st.sidebar.checkbox("Split sentences", value=True)
    collapse_punct = st.sidebar.checkbox("Collapse punctuation", value=True)
    collapse_phrases = st.sidebar.checkbox("Collapse phrases")
    compact = st.sidebar.checkbox("Compact mode")
    options = {
        "collapse_punct": collapse_punct,
        "collapse_phrases": collapse_phrases,