Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def write():
"""Used to write the page in the app.py file"""
with st.spinner("Loading About ..."):
ast.shared.components.title_awesome(" - About")
st.markdown(
"""## Contributions
This an open source project and you are very welcome to **contribute** your awesome
comments, questions, resources and apps as
[issues](https://github.com/MarcSkovMadsen/awesome-streamlit/issues) or
[pull requests](https://github.com/MarcSkovMadsen/awesome-streamlit/pulls)
to the [source code](https://github.com/MarcSkovMadsen/awesome-streamlit).
For more details see the [Contribute](https://github.com/marcskovmadsen/awesome-streamlit#contribute) section of the README file.
## The Developer
This project is developed by Marc Skov Madsen. You can learn more about me at
[datamodelsanalytics.com](https://datamodelsanalytics.com).
Feel free to reach out if you wan't to join the project as a developer. You can find my contact details at [datamodelsanalytics.com](https://datamodelsanalytics.com).
if model.hqic <= best_model_hqic:no_of_lower_metrics += 1
if no_of_lower_metrics >= 2:
best_model_aic = np.round(model.aic,0)
best_model_bic = np.round(model.bic,0)
best_model_hqic = np.round(model.hqic,0)
best_model_order = (p_, d, q_, P_, D, Q_, s)
current_best_model = model
resid = np.round(np.expm1(current_best_model.resid).mean(), 3)
models.append(model)
#st.markdown("------------------")
#st.markdown("**Best model so far**: SARIMA {}".format(best_model_order))
#st.markdown("**AIC**: {} **BIC**: {} **HQIC**: {} **Resid**: {}".format(best_model_aic, best_model_bic, best_model_hqic, resid))
except:
pass
st.success('Grid Search done!')
st.markdown('')
st.markdown('### Best model results')
st.text(current_best_model.summary())
#return current_best_model, models, best_model_order
return best_model_order
"Caraballo, S.A. (1999) Automatic construction of a hypernym-labeled noun hierarchy. In Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL'99), College Park, Maryland, pp. 120-126.",
],
)
user_text = st.text_input(label="Enter a citation string", value=text_selected)
parse_button_clicked = st.button("Parse Citation")
elif model_selected == "I2B2 Clinical Notes Tagging":
st.title("I2B2 Clinical Notes Tagging")
st.markdown(
"Clinical Natural Language Processing helps in identifying salient information from clinical notes."
"Here, we have trained a neural network model on the **i2b2: Informatics for Integrating Biology and the Bedside** dataset."
"This dataset has manual annotation for the problems identified, the treatments and tests suggested."
)
st.markdown(
"**MODEL**: We trained a Bi-LSTM model with a CRF on top. We also included Elmo Embedding in the first layer"
)
text_selected = st.selectbox(
label="Select an Example Clinical Note",
options=[
"Chest x - ray showed no evidence of cardiomegaly .",
"Prostrate cancer and Renal failure",
"Continue with Risperdal as per new psychiatrist",
],
)
st.markdown("---")
user_text = st.text_input(label="Enter Clinical Notes", value=text_selected)
parse_button_clicked = st.button("Parse Clinical Notes")
if parse_button_clicked:
text_selected = user_text
import streamlit as st
import requests
st.title("Citation Intent Classification")
st.markdown(
"Identify the intent behind citing another scholarly document helps "
"in fine-grain analysis of documents. Some citations refer to the "
"methodology in another document, some citations may refer to other works"
"for background knowledge and some might compare and contrast their methods with another work. "
"Citation Intent Classification models classify such intents."
)
st.markdown(
"**MODEL DESCRIPTION: ** This model is similar to [Arman Cohan et al](https://arxiv.org/pdf/1904.01608.pdf). We do not perform multi-task learning, but include "
"ELMo Embeddings in the model."
)
st.markdown("---")
st.write("**The Labels can be one of: **")
st.write(
"""<span style="display:inline-block; border: 1px solid #0077B6; border-radius: 5px; padding: 5px; background-color: #0077B6; color: white; margin: 5px;">
RESULT
</span>
<span style="display:inline-block; border: 1px solid #0077B6; border-radius: 5px; padding: 5px; background-color: #0077B6; color: white; margin: 5px;">
BACKGROUND
</span>
<span style="display:inline-block; border: 1px solid #0077B6; border-radius: 5px; padding: 5px; background-color: #0077B6; color: white; margin: 5px;">
METHOD</span>
def sallery_predictor_component():
"""## Sallery Predictor Component
A user can input some of his developer features like years of experience and he will get a
prediction of his sallery
"""
st.markdown("## App 2: Salary Predictor For Techies")
experience = st.number_input("Years of Experience")
test_score = st.number_input("Aptitude Test score")
interview_score = st.number_input("Interview Score")
if st.button("Predict"):
model = get_pickle(MODEL_PKL_FILE)
features = [experience, test_score, interview_score]
final_features = [np.array(features)]
prediction = model.predict(final_features)
st.balloons()
st.success(f"Your Salary per anum is: Ghc {prediction[0]:.0f}")
"Visualize the annotations using [displaCy](https://spacy.io/usage/visualizers) "
"and view stats about the datasets."
)
data_file = st.sidebar.selectbox("Dataset", FILES)
data = load_data(data_file)
n_no_ents = 0
n_total_ents = 0
st.header(f"{data_file} ({len(data)})")
for eg in data:
row = {"text": eg["text"], "ents": eg.get("spans", [])}
n_total_ents += len(row["ents"])
if not row["ents"]:
n_no_ents += 1
html = displacy.render(row, **SETTINGS).replace("\n\n", "\n")
st.markdown(HTML_WRAPPER.format(html), unsafe_allow_html=True)
st.sidebar.markdown(
f"""
| `{data_file}` | |
| --- | ---: |
def crawl(self, seed):
self.frontier.enqueue(seed)
while len(self.frontier):
# Pull off next url
url = self.frontier.dequeue()
status = "Crawling: {}".format(url)
print(status)
st.markdown(status)
url_data = crawl_url(url)
if url_data and url_data['status'] == 200:
canonical = url_data['meta']['canonical']
doc_hash = self.urllookup.update_hashed(url, canonical=canonical)
doc_data = {'url': url,
'domain':url_data['domain'] ,
'title': url_data['title'],
'description': url_data['meta']['description'],
'content': url_data['content'],
'html': url_data['html']
}
def bert_data(indexer):
fn = 'bert.pkl'
loaded = storage.load_pickle(fn)
if loaded:
st.markdown('Loaded saved BERT file.')
return loaded
indexer.build_bert_embeddings_st()
storage.save_pickle(fn, indexer.bert)
return indexer.bert
def streamlit_dataframe(results):
st.subheader("Streamlit Dataframe (st.dataframe)")
number_of_rows, number_of_columns, style = select_number_of_rows_and_columns(
results, key="st.dataframe"
)
filter_table = filter_results(results, number_of_rows, number_of_columns, style)
st.dataframe(filter_table)
st.markdown(
"""
Pros
import streamlit as st
st.markdown("This **markdown** is awesome! :sunglasses:")
st.markdown("This <b>HTML tag</b> is escaped!")
st.markdown("This <b>HTML tag</b> is not escaped!", unsafe_allow_html=True)
st.markdown("[text]")
st.markdown("[link](href)")
st.markdown("[][]")
st.markdown("Inline math with $\KaTeX$")
st.markdown(
"""
$$