Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
st.image(channels, caption=["Red", "Green", "Blue"], width=200)
st.header("Visualizing data as images via OpenCV")
st.write("Streamlit also supports OpenCV!")
try:
import cv2
if image_bytes is not None:
with st.echo():
image = cv2.cvtColor(
cv2.imdecode(np.fromstring(image_bytes, dtype="uint8"), 1),
cv2.COLOR_BGR2RGB,
)
st.image(image, caption="Sunset", use_column_width=True)
st.image(cv2.split(image), caption=["Red", "Green", "Blue"], width=200)
except ImportError as e:
st.write(
"If you install opencv with the command `pip install opencv-python-headless` "
"this section will tell you how to use it."
)
st.warning("Error running opencv: " + str(e))
st.header("Inserting headers")
st.write(
"To insert titles and headers like the ones on this page, use the `title`, "
"`header`, and `subheader` functions."
)
st.header("Visualizing data as images via OpenCV")
st.write("Streamlit also supports OpenCV!")
try:
import cv2
if image_bytes is not None:
with st.echo():
image = cv2.cvtColor(
cv2.imdecode(np.fromstring(image_bytes, dtype="uint8"), 1),
cv2.COLOR_BGR2RGB,
)
st.image(image, caption="Sunset", use_column_width=True)
st.image(cv2.split(image), caption=["Red", "Green", "Blue"], width=200)
except ImportError as e:
st.write(
"If you install opencv with the command `pip install opencv-python-headless` "
"this section will tell you how to use it."
)
st.warning("Error running opencv: " + str(e))
st.header("Inserting headers")
st.write(
"To insert titles and headers like the ones on this page, use the `title`, "
"`header`, and `subheader` functions."
)
st.header("Preformatted text")
MQTT_BROKER = CONFIG["mqtt"]["broker"]
MQTT_PORT = CONFIG["mqtt"]["port"]
MQTT_QOS = CONFIG["mqtt"]["QOS"]
MQTT_TOPIC = CONFIG["save-captures"]["mqtt_topic"]
VIEWER_WIDTH = 600
def get_random_numpy():
"""Return a dummy frame."""
return np.random.randint(0, 100, size=(32, 32))
title = st.title(MQTT_TOPIC)
viewer = st.image(get_random_numpy(), width=VIEWER_WIDTH)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
st.write(
f"Connected with result code {str(rc)} to MQTT broker on {MQTT_BROKER}"
)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
if msg.topic != MQTT_TOPIC:
return
image = byte_array_to_pil_image(msg.payload)
image = image.convert("RGB")
viewer.image(image, width=VIEWER_WIDTH)
As you can see below, once we've used the model to generate image features, we can then **store them to disk**
and re-use them without needing to do inference again! This is one of the reason that embeddings are so popular
in practical applications, as they allow for huge efficiency gains.
""")
with st.echo():
model = vector_search.load_headless_pretrained_model()
if generate_image_features:
images_features, file_index = vector_search.generate_features(image_paths, model)
vector_search.save_features(features_path, images_features, file_mapping_path, file_index)
else:
images_features, file_index = vector_search.load_features(features_path, file_mapping_path)
st.write("Our model is simply VGG16 without the last layer (softmax)")
st.image(Image.open('assets/vgg16_architecture.jpg'), width=800, caption="Original VGG. Credit to Data Wow Blog")
st.image(Image.open('assets/vgg16_chopped.jpg'), width=800, caption="Our model")
st.write("This is how we get such a model in practice")
show_source(vector_search.load_headless_pretrained_model)
st.write("""
What do we mean by generating embeddings? Well we just use our pre-trained model up to the penultimate layer, and
store the value of the activations.""")
show_source(vector_search.generate_features)
st.write('Here are what the embeddings look like for the first 20 images. Each image is now represented by a '
'sparse vector of size 4096:')
st.write(images_features[:20])
st.write("Now that we have the features, we will build a fast index to search through them using Annoy.")
with st.echo():
image_index = vector_search.index_features(images_features)
import requests
from PIL import Image
import streamlit as st
@st.cache
def read_file_from_url(url):
return requests.get(url).content
file_bytes = read_file_from_url(
"https://streamlit.io/media/photo-1548407260-da850faa41e3.jpeg"
)
image = Image.open(BytesIO(file_bytes))
st.image(image, caption="Sunrise by the mountains", use_column_width=True)
st.write(
"""
#### Image credit:
"The error has occurred. Most probably you have passed wrong set of parameters. \
Check transforms that change the shape of image."
)
# proceed only if everything is ok
if error == 0:
augmented_image = data["image"]
# show title
st.title("Demo of Albumentations")
# show the images
width_transformed = int(
width_original / image.shape[1] * augmented_image.shape[1]
)
st.image(image, caption="Original image", width=width_original)
st.image(
augmented_image,
caption="Transformed image",
width=width_transformed,
)
# comment about refreshing
st.write("*Press 'R' to refresh*")
# random values used to get transformations
show_random_params(data, interface_type)
# print additional info
for transform in transforms:
show_docstring(transform)
st.code(str(transform))
elif species_type == 'Versicolor':
st.text("Showing Versicolor Species")
st.image(load_image('imgs/iris_versicolor.jpg'))
elif species_type == 'Virginica':
st.text("Showing Virginica Species")
st.image(load_image('imgs/iris_virginica.jpg'))
# Show Image or Hide Image with Checkbox
if st.checkbox("Show Image/Hide Image"):
my_image = load_image('iris_setosa.jpg')
enh = ImageEnhance.Contrast(my_image)
num = st.slider("Set Your Contrast Number",1.0,3.0)
img_width = st.slider("Set Image Width",300,500)
st.image(enh.enhance(num),width=img_width)
# About
if st.button("About App"):
st.subheader("Iris Dataset EDA App")
st.text("Built with Streamlit")
st.text("Thanks to the Streamlit Team Amazing Work")
if st.checkbox("By"):
st.text("Jesse E.Agbe(JCharis)")
st.text("Jesus Saves@JCharisTech")
track_df = deepcopy(DF[DF.track_id == track_id].reset_index(drop = True))
# return values from db if already classified, otherwise classify
if len(track_df) > 0:
track_df = deepcopy(track_df.iloc[:,5:].T.rename(columns = {0: 'score'}).sort_values('score', ascending = False))
st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
if show_spectros: generate_spectros(preview)
else:
generate_spectros(preview)
track_df = get_predictions()
st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
if show_spectros:
st.subheader('Spectrograms (What the Algorithm "Sees")')
generate_grid()
st.image(image = path('tmp/png/grid.png'), use_column_width = True)
# Spotify doesn't have preview for track
else:
st.write('Preview unavailable for this track :(')
st.subheader("Generating semantic tags")
st.write("We can now easily extract tags from any image")
st.write("Let's try with our cat/bottle image")
st.image(to_array(image_paths[200]))
st.write('Generating tags for `%s`' % file_mapping[200])
with st.echo():
results = vector_search.search_index_by_value(hybrid_images_features[200], word_index, word_mapping)
show_source(vector_search.search_index_by_value)
st.write('\n'.join('- `%s`' % elt for elt in results))
st.write("These results are reasonable, let's try to see if we can detect more than the bottle in the "
"messy image below.")
st.image(to_array(image_paths[223]))
st.write('Generating tags for `%s`' % file_mapping[223])
with st.echo():
results = vector_search.search_index_by_value(hybrid_images_features[223], word_index, word_mapping)
st.write('\n'.join('- `%s`' % elt for elt in results))
st.write("The model learns to extract **many relevant tags**, even from categories that it was not trained on!")
st.subheader("Searching for images using text")
st.write("""
Most importantly, we can use our joint embedding to search through our image database using any word.
We simply need to get our pre-trained word embedding from GloVe, and find the images that have the most similar
embeddings! Generalized image search with minimal data.
Let's start first with a word that was actually in our training set
""")