How to use the streamlit.cache function in streamlit

To help you get started, we’ve selected a few streamlit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github streamlit / streamlit / examples / uber.py View on Github external
@st.cache(persist=True)
def load_data(nrows):
    data = pd.read_csv(DATA_URL, nrows=nrows)
    lowercase = lambda x: str(x).lower()
    data.rename(lowercase, axis="columns", inplace=True)
    data[DATE_TIME] = pd.to_datetime(data[DATE_TIME])
    return data
github streamlit / streamlit / docs / api-examples-source / charts.video.py View on Github external
@st.cache
def read_file_from_url(url):
    return requests.get(url).content
github MarcSkovMadsen / awesome-streamlit / gallery / self_driving_cars / self_driving_cars.py View on Github external
    @st.cache(allow_output_mutation=True)
    def load_network(config_path, weights_path):
        net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
        output_layer_names = net.getLayerNames()
        output_layer_names = [output_layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
        return net, output_layer_names
    net, output_layer_names = load_network("yolov3.cfg", "yolov3.weights")
github MarcSkovMadsen / awesome-streamlit / gallery / ml_app_registry / app.py View on Github external
@st.cache
def get_sentiment_analyzer() -> SentimentIntensityAnalyzer:
    """An instance of the SentimentIntensityAnalyzer

    Returns:
        SentimentIntensityAnalyzer -- A SentimentIntensityAnalyzer
    """
    return SentimentIntensityAnalyzer()  # initialize it
github virusvn / streamlit-components-demo / app.py View on Github external
@st.cache
def get_file_content_as_string(url: str):
    data = urllib.request.urlopen(url).read()
    return data.decode("utf-8")
github joelgrus / streamlit-allennlp / app.py View on Github external
import streamlit as st
from allennlp import pretrained
import matplotlib.pyplot as plt
import numpy as np

st.header("AllenNLP Demo")

# Load the pretrained BiDAF model for question answering.
# (It's big, don't do this over dial-up.)
# Use st.cache so that it doesn't reload when you change the inputs.
predictor = st.cache(
       pretrained.bidirectional_attention_flow_seo_2017,
       ignore_hash=True  # the Predictor is not hashable
)()

# Create a text area to input the passage.
passage = st.text_area("passage", "The Matrix is a 1999 movie starring Keanu Reeves.")

# Create a text input to input the question.
question = st.text_input("question", "When did the Matrix come out?")

# Use the predictor to find the answer.
result = predictor.predict(question, passage)

# From the result, we want "best_span", "question_tokens", and "passage_tokens"
start, end = result["best_span"]
question_tokens = result["question_tokens"]
github streamlit / streamlit / examples / reference.py View on Github external
@st.cache(persist=True)
def read_file_from_url(url):
    try:
        return requests.get(url).content
    except requests.exceptions.RequestException:
        st.error("Unable to load file from %s. " "Is the internet connected?" % url)
    except Exception as e:
        st.exception(e)
    return None
github MarcSkovMadsen / awesome-streamlit / gallery / ml_app_registry / app.py View on Github external
@st.cache
def get_pickle(file: str):
    """An instance of an object from the pickle file"""
    github_url = GITHUB_ROOT + file
    with urllib.request.urlopen(github_url) as open_file:  # type: ignore
        return pickle.load(open_file)
github MarcSkovMadsen / awesome-streamlit / package / awesome_streamlit / core / services / other.py View on Github external
@st.cache
def get_file_content_as_string(url: str) -> str:
    """The url content as a string

    Arguments:
        url {str} -- The url to request

    Returns:
        str -- The text of the url
    """
    # Load local if possible
    if url.startswith(GITHUB_RAW_URL):
        path = pathlib.Path.cwd() / url.replace(GITHUB_RAW_URL, "")
        if path.exists():
            with open(path, encoding="utf8") as file:
                content = file.read()
            return content