How to use streamlit - 10 common examples

To help you get started, we’ve selected a few streamlit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MarcSkovMadsen / awesome-streamlit / package / awesome_streamlit / testing / components.py View on Github external
def test_run_section(test_items: List[TesTItem]):
    """This section tests each test_item and reports the Test Results and Exception Log

    Arguments:
        test_item {TesTItem} -- The TesTItem to be tested
    """
    # Setup Sub Section
    st.subheader("""Run tests""")

    test_runner_progress_bar = st.progress(0)
    test_runner_current_file_url = st.empty()

    st.subheader("Results")
    result_table_section = st.empty()

    st.subheader("Exceptions log")
    result_exception_section = st.empty()
    log = ""

    st.subheader("Screen output")

    test_items_dataframe = services.test_items_dataframe.create_from_test_items(
        test_items
    )
    result_table_section.table(test_items_dataframe)

    test_items_count = len(test_items)
    for index, test_item in enumerate(test_items):
        _progress_section(
github zacheberhart / Learning-to-Feel / src / app.py View on Github external
track_id = track_id.split('?')[0].split('/track/')[1]
	show_spectros = st.checkbox('Show Spectrograms', value = False)

	# check if a track_id has been entered
	if len(track_id) > 0:
	
		# get track from Spotify API
		track = get_spotify_track(track_id)
		st.subheader('Track Summary')
		st.table(get_track_summmary_df(track))

		# check if there is track preview available from Spotify
		if track['preview_url']:

			# display 30 second track preview
			st.subheader('Track Preview (What the Algorithm "Hears")')
			st.write('')
			preview = get_track_preview(track_id)
			st.audio(preview)

			# get top and bottom labels for the track
			st.subheader('Track Analysis')
			track_df = deepcopy(DF[DF.track_id == track_id].reset_index(drop = True))

			# return values from db if already classified, otherwise classify
			if len(track_df) > 0:
				track_df = deepcopy(track_df.iloc[:,5:].T.rename(columns = {0: 'score'}).sort_values('score', ascending = False))
				st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
				if show_spectros: generate_spectros(preview)
			else:
				generate_spectros(preview)
				track_df = get_predictions()
github IliaLarchenko / albumentations-demo / src / utils.py View on Github external
def show_random_params(data: dict, interface_type: str = "Professional"):
    """Shows random params used for transformation (from A.ReplayCompose)"""
    if interface_type == "Professional":
        st.subheader("Random params used")
        random_values = {}
        for applied_params in data["replay"]["transforms"]:
            random_values[
                applied_params["__class_fullname__"].split(".")[-1]
            ] = applied_params["params"]
        st.write(random_values)
github streamlit / streamlit / examples / mnist-cnn.py View on Github external
model = Sequential()
layer_1_size = 10
epochs = 3

model.add(Conv2D(10, (5, 5), input_shape=(img_width, img_height, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Conv2D(config.layer_2_size, (5, 5), input_shape=(img_width, img_height,1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(8, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))

model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

show_terminal_output = not config.get_option("server.liveSave")
model.fit(
    x_train,
    y_train,
    validation_data=(x_test, y_test),
    epochs=epochs,
    verbose=show_terminal_output,
    callbacks=[MyCallback(x_test)],
)

st.success("Finished training!")
github awarebayes / RecNN / examples / streamlit_demo.py View on Github external
if page == "🔨 Test Recommendation":

        st.header("Test the Recommendations")

        st.info("Upon the first opening the data will start loading."
                "\n Unfortunately there is no progress verbose in streamlit. Look in your console.")

        st.success('Data is loaded!')

        models = load_models(device)
        st.success('Models are loaded!')

        state, action, reward, next_state, done = get_batch(device)

        st.subheader('Here is a random batch sampled from testing environment:')
        if st.checkbox('Print batch info'):
            st.subheader('State')
            st.write(state)
            st.subheader('Action')
            st.write(action)
            st.subheader('Reward')
            st.write(reward.squeeze())

        st.subheader('(Optional) Select the state are getting the recommendations for')

        action_id = np.random.randint(0, state.size(0), 1)[0]
        action_id_manual = st.checkbox('Manually set state index')
        if action_id_manual:
            action_id = st.slider("Choose state index:", min_value=0, max_value=state.size(0))

        st.write('state:', state[action_id])
github MarcSkovMadsen / awesome-streamlit / gallery / medical_language_learner / medical_language_learner.py View on Github external
data_ = dimred.fit_transform(tfidf)
        data__ = pd.DataFrame(data=data_,columns=["principal component 1","principal component 2"])
        labels_ = pd.DataFrame(data=enc.inverse_transform(labels),columns=["class"])
        data___ = pd.concat((data__,labels_),axis=1)
        c = alt.Chart(data___,title="dimensionality reduction",height=600).mark_circle(size=20).encode(x='principal component 1', y='principal component 2',color=alt.Color('class', scale=alt.Scale(scheme='blues')),tooltip=["class"]).interactive()
        st.altair_chart(c)
        st.write("The explained variance is",np.round(np.sum(dimred.explained_variance_ratio_)*100,2),"%.")

    # MODEL BUILDING.
    st.header("Model Building")
    st.write("The model is based on a **random forest**. Customize the model in the sidebar.")
    st.sidebar.header("Customizing the model.")
    n_estimators = st.sidebar.text_input('Number of trees in random forest.', '1000')
    max_leaf_nodes = st.sidebar.text_input('Maximum number of lead nodes.', '25')
    max_depth = st.sidebar.text_input('Maximum depth.', '5')
    class_weight = st.sidebar.selectbox("Class weights for the model.",('balanced','balanced_subsample'))
    forest_clf = RandomForestClassifier(n_estimators=int(n_estimators),max_depth=int(max_depth),max_leaf_nodes=int(max_leaf_nodes),class_weight=class_weight,oob_score=True,n_jobs=-1,random_state=0) # Define classifier to optimize.
    #parameters = {'max_leaf_nodes':np.linspace(20,35,14,dtype='int')} # Define grid.
    #clf = RandomizedSearchCV(forest_clf, parameters, n_iter=10, cv=3,iid=False, scoring='accuracy',n_jobs=-1) # Balanced accuracy as performance measure.

    #@st.cache(show_spinner=False)
    def train():
        classifier = forest_clf.fit(tfidf, labels) # Train/optimize classifier.
        #forest = classifier.best_estimator_
        feature_importances = classifier.feature_importances_
        indices = np.argsort(feature_importances)[::-1]

        # Analyze Feature Importance.
        n_f = 30 # Amount of Desired Features.
        sorted_feature_names = []
        for f in range(n_f):
            sorted_feature_names.append(feature_names[indices[f]])
github awarebayes / RecNN / examples / streamlit_demo.py View on Github external
**= MODELSPATH**
            - [Data for Streamlit Demo](https://drive.google.com/file/d/1nuhHDdC4mCmiB7g0fmwUSOh1jEUQyWuz/view?usp=sharing)
             **= DATAPATH**
            - [ML20M Dataset](https://grouplens.org/datasets/movielens/20m/)
             **= ML20MPATH**
             
            p.s. ml20m is only needed for links.csv, I couldn't include it in my streamlit data because of copyright.
            This is all the data you need.
            """
        )

    if page == "🔨 Test Recommendation":

        st.header("Test the Recommendations")

        st.info("Upon the first opening the data will start loading."
                "\n Unfortunately there is no progress verbose in streamlit. Look in your console.")

        st.success('Data is loaded!')

        models = load_models(device)
        st.success('Models are loaded!')

        state, action, reward, next_state, done = get_batch(device)

        st.subheader('Here is a random batch sampled from testing environment:')
        if st.checkbox('Print batch info'):
            st.subheader('State')
            st.write(state)
            st.subheader('Action')
            st.write(action)
            st.subheader('Reward')
github streamlit / streamlit / examples / reference.py View on Github external
st.image(channels, caption=["Red", "Green", "Blue"], width=200)

st.header("Visualizing data as images via OpenCV")

st.write("Streamlit also supports OpenCV!")
try:
    import cv2

    if image_bytes is not None:
        with st.echo():
            image = cv2.cvtColor(
                cv2.imdecode(np.fromstring(image_bytes, dtype="uint8"), 1),
                cv2.COLOR_BGR2RGB,
            )

            st.image(image, caption="Sunset", use_column_width=True)
            st.image(cv2.split(image), caption=["Red", "Green", "Blue"], width=200)
except ImportError as e:
    st.write(
        "If you install opencv with the command `pip install opencv-python-headless` "
        "this section will tell you how to use it."
    )

    st.warning("Error running opencv: " + str(e))

st.header("Inserting headers")

st.write(
    "To insert titles and headers like the ones on this page, use the `title`, "
    "`header`, and `subheader` functions."
)
github streamlit / streamlit / examples / reference.py View on Github external
st.header("Visualizing data as images via OpenCV")

st.write("Streamlit also supports OpenCV!")
try:
    import cv2

    if image_bytes is not None:
        with st.echo():
            image = cv2.cvtColor(
                cv2.imdecode(np.fromstring(image_bytes, dtype="uint8"), 1),
                cv2.COLOR_BGR2RGB,
            )

            st.image(image, caption="Sunset", use_column_width=True)
            st.image(cv2.split(image), caption=["Red", "Green", "Blue"], width=200)
except ImportError as e:
    st.write(
        "If you install opencv with the command `pip install opencv-python-headless` "
        "this section will tell you how to use it."
    )

    st.warning("Error running opencv: " + str(e))

st.header("Inserting headers")

st.write(
    "To insert titles and headers like the ones on this page, use the `title`, "
    "`header`, and `subheader` functions."
)

st.header("Preformatted text")
github leenamurgai / debias-ml / source / analysis_oversampling.py View on Github external
y_pred = train_predict_new(clf_nn, X_train_new, y_train_new,
                                   X_test_new, y_test_new, results_df, 0)
plot_distributions(y_pred, Z_test_new, target_feature, sensitive_features,
                   bias_cols, categories, 0, results_df, 'fair-data')

################################################################################

st.write('')
st.subheader('3.4 ...after oversampling by different amounts')
st.write('')

results_df = pd.DataFrame()

#for factor in np.linspace(0.0, 5.0, num=11):
for factor in range(1, 11):
    st.write('**Oversample factor:**', factor)
    # Oversampling to address bias in the training dataset
    X_new, y_new, Z_new = oversampler.get_oversampled_data(factor)
    # Shuffle the data after oversampling
    X_train_new, y_train_new, Z_train_new = shuffle(X_new, y_new, Z_new,
                                                    random_state=0)
    # initialise NeuralNet Classifier
    clf_nn = nn_classifier(n_features=X_train_new.shape[1])
    # make predictions on the test set
    y_pred = train_predict_new(clf_nn, X_train_new, y_train_new,
                                       X_test, y_test, results_df, factor)
    plot_distributions(y_pred, Z_test, target_feature, sensitive_features,
                       bias_cols, categories, factor, results_df,
                       'fair-algo-'+str(factor))

st.table(results_df)