Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_run_section(test_items: List[TesTItem]):
"""This section tests each test_item and reports the Test Results and Exception Log
Arguments:
test_item {TesTItem} -- The TesTItem to be tested
"""
# Setup Sub Section
st.subheader("""Run tests""")
test_runner_progress_bar = st.progress(0)
test_runner_current_file_url = st.empty()
st.subheader("Results")
result_table_section = st.empty()
st.subheader("Exceptions log")
result_exception_section = st.empty()
log = ""
st.subheader("Screen output")
test_items_dataframe = services.test_items_dataframe.create_from_test_items(
test_items
)
result_table_section.table(test_items_dataframe)
test_items_count = len(test_items)
for index, test_item in enumerate(test_items):
_progress_section(
track_id = track_id.split('?')[0].split('/track/')[1]
show_spectros = st.checkbox('Show Spectrograms', value = False)
# check if a track_id has been entered
if len(track_id) > 0:
# get track from Spotify API
track = get_spotify_track(track_id)
st.subheader('Track Summary')
st.table(get_track_summmary_df(track))
# check if there is track preview available from Spotify
if track['preview_url']:
# display 30 second track preview
st.subheader('Track Preview (What the Algorithm "Hears")')
st.write('')
preview = get_track_preview(track_id)
st.audio(preview)
# get top and bottom labels for the track
st.subheader('Track Analysis')
track_df = deepcopy(DF[DF.track_id == track_id].reset_index(drop = True))
# return values from db if already classified, otherwise classify
if len(track_df) > 0:
track_df = deepcopy(track_df.iloc[:,5:].T.rename(columns = {0: 'score'}).sort_values('score', ascending = False))
st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
if show_spectros: generate_spectros(preview)
else:
generate_spectros(preview)
track_df = get_predictions()
def show_random_params(data: dict, interface_type: str = "Professional"):
"""Shows random params used for transformation (from A.ReplayCompose)"""
if interface_type == "Professional":
st.subheader("Random params used")
random_values = {}
for applied_params in data["replay"]["transforms"]:
random_values[
applied_params["__class_fullname__"].split(".")[-1]
] = applied_params["params"]
st.write(random_values)
}
}, {
"transform": [{"filter": {"selection": "CylYr"}}],
"mark": "circle",
"encoding": {
"x": {"field": "Horsepower", "type": "quantitative"},
"y": {"field": "Miles_per_Gallon", "type": "quantitative"},
"color": {"field": "Origin", "type": "nominal"},
"size": {"value": 100}
}
}]
})
# -
st.subheader('Geo chart (WIP)')
DATA_URL = 'https://s3-us-west-2.amazonaws.com/streamlit-demo-data/uber-raw-data-sep14.csv.gz'
df = pd.read_csv(DATA_URL, nrows=1000)
df['hour'] = df['Date/Time'].apply(lambda x: parse(x).hour)
df = df.rename(str.lower, axis='columns')
st.vega_lite_chart(df, {
"$schema": "https://vega.github.st/schema/vega-lite/v2.1.json",
"height": 500,
"projection": {
"type": "albersUsa"
},
"mark": "circle",
"encoding": {
"longitude": {
"field": "lon",
the dataframe you pass in at the start. So when there's no dataframe we
cannot detect that configuration.
"""
)
data = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 3, 2, 4]})
spec = {
"mark": "line",
"encoding": {
"x": {"field": "a", "type": "quantitative"},
"y": {"field": "b", "type": "quantitative"},
},
}
st.subheader("Here are 4 empty charts")
st.vega_lite_chart(spec)
st.line_chart()
st.area_chart()
st.bar_chart()
st.write("Below is an empty pyplot chart (i.e. just a blank image)")
st.pyplot()
st.write("...and that was it.")
st.subheader("Here are 5 filled charts")
x = st.vega_lite_chart(spec)
x.vega_lite_chart(data, spec)
x = st.vega_lite_chart(spec)
time.sleep(0.2) # Sleep a little so the add_rows gets sent separately.
x.add_rows(data)
visualizations:
optical_flow_on_image:
image_key: "images/0/image"
flow_key: "forward_flow"
vector_frequency: 5
flow_downsample_method: max_magnitude
Parameters
----------
ex : dict
Example dict from dataset
config : dict
Config dict
"""
st.subheader("Optical flow on image")
# get user input
image, image_key = st_get_list_or_dict_item(
ex,
"image",
filter_fn=isimage,
config=config,
config_key="edexplore/visualizations/optical_flow_on_image/image_key",
)
flow, flow_key = st_get_list_or_dict_item(
ex,
"flow",
filter_fn=isflow,
config=config,
config_key="edexplore/visualizations/optical_flow_on_image/flow_key",
)
st.write('')
st.write(top_n_correlated_features(data_df, target_col, 10))
st.write('')
st.write('**Top 10 most correlated features to the bias feature**')
for b in bias_cols:
st.write('')
st.write(top_n_correlated_features(data_df, b, 10))
st.write('')
st.write('**Correlation Heatmap**')
corr_df = data_df.corr()
heatmap(corr_df, 'correlation-heat-map')
################################################################################
st.write('')
st.subheader('2.4 Splitting data into training and test sets')
st.write('')
X_all = data_df[feature_cols]
y_all = data_df[target_col]
Z_all = data_df[bias_cols]
# Splitting the original dataset into training and testing parts
(X_train, X_train2, X_train1, X_test,
y_train, y_train2, y_train1, y_test,
Z_train, Z_test) = make_training_and_test_sets(X_all, y_all, Z_all, n_train)
X_train, X_train2, X_train1, X_test = normalise(X_train, X_train2, X_train1, X_test)
st.write('Training set: {} samples'.format(X_train.shape[0]))
st.write('Test set: {} samples'.format(X_test.shape[0]))
################################################################################
st.image(load_image("imgs/iris_versicolor.jpg"), width=400)
elif species_type == "Virginica":
st.text("Showing Virginica Species")
st.image(load_image("imgs/iris_virginica.jpg"), width=400)
# Show Image or Hide Image with Checkbox
if st.checkbox("Show Image/Hide Image"):
my_image = load_image("iris_setosa.jpg")
enh = ImageEnhance.Contrast(my_image)
num = st.slider("Set Your Contrast Number", 1.0, 3.0)
img_width = st.slider("Set Image Width", 300, 500)
st.image(enh.enhance(num), width=img_width)
# About
if st.button("About App"):
st.subheader("Iris Dataset EDA App")
st.text("Built with Streamlit")
st.text("Thanks to the Streamlit Team Amazing Work")
if st.checkbox("By"):
st.text("Jesse E.Agbe(JCharis)")
st.text("Jesus Saves@JCharisTech")
st.write(
"Charts are just as simple, but they require us to introduce some "
"special functions first."
)
st.write("So assuming `data_frame` has been defined as...")
with st.echo():
chart_data = pd.DataFrame(
np.random.randn(20, 5), columns=["pv", "uv", "a", "b", "c"]
)
st.write("...you can easily draw the charts below:")
st.subheader("Example of line chart")
with st.echo():
st.line_chart(chart_data)
st.write(
"As you can see, each column in the dataframe becomes a different "
"line. Also, values on the _x_ axis are the dataframe's indices. "
"Which means we can customize them this way:"
)
with st.echo():
chart_data2 = pd.DataFrame(
np.random.randn(20, 2),
columns=["stock 1", "stock 2"],
index=pd.date_range("1/2/2011", periods=20, freq="M"),
)
if sma2:
period2 = st.sidebar.slider(
"SMA2 period", min_value=5, max_value=500, value=100, step=1
)
data[f"SMA2 {period2}"] = data["Adj Close"].rolling(period2).mean()
data2[f"SMA2 {period2}"] = data[f"SMA2 {period2}"].reindex(data2.index)
st.subheader("Chart")
st.line_chart(data2)
if st.sidebar.checkbox("View stadistic"):
st.subheader("Stadistic")
st.table(data2.describe())
if st.sidebar.checkbox("View quotes"):
st.subheader(f"{asset} historical data")
st.write(data2)
st.sidebar.title("About")
st.sidebar.info(
"This app is a simple example of "
"using Strealit to create a financial data web app.\n"