Andrii Demydenko commited on
Commit
6e4b95f
1 Parent(s): 5b15289

add requirements, multi-pages struct, basic functionality

Browse files
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from st_pages import Page, show_pages, add_page_title
3
+
4
+
5
+ def main():
6
+ st.session_state["shared"] = True
7
+ st.set_page_config(
8
+ page_title="NLP - Project",
9
+ page_icon="馃",
10
+ )
11
+ add_page_title()
12
+ show_pages(
13
+ [
14
+ Page("pages/introduction.py", "Intro", "馃槉"),
15
+ Page("pages/data_analysis.py", "Data and data", "馃搳"),
16
+ Page("pages/run_models.py", "Run models", "馃"),
17
+ ]
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ main()
pages/data_analysis.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from matplotlib import pyplot as plt
4
+ from wordcloud import WordCloud, STOPWORDS
5
+ from datasets import load_dataset
6
+ import numpy as np
7
+
8
+
9
+ ds = load_dataset("rajteer/Natural_disaster_tweets", revision="main", split='train')
10
+
11
+ text_data = ' '.join([row['tweet_text'] for row in ds])
12
+
13
+
14
+ wordcloud = WordCloud(width=800, height=400).generate(text_data)
15
+
16
+ fig, ax = plt.subplots(figsize=(10, 5))
17
+ ax.imshow(wordcloud, interpolation='bilinear')
18
+ ax.axis('off')
19
+
20
+ st.pyplot(fig)
21
+
22
+
23
+ # DataSet links
24
+ st.subheader("DataSet links")
25
+ st.markdown("- [Humaid Dataset](https://crisisnlp.qcri.org/humaid_dataset?fbclid=IwAR2rpSdcVhcXvQagxAG5VA2dvwAUOJOCVwTKxqtDiz7soIhVMUtp_N0BfSo)")
pages/introduction.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.title("NLP - Project")
4
+
5
+ st.write("**_Wprowadzenie do przetwarzania j臋zyka naturalnego_**")
6
+
7
+ st.subheader("Contributors:")
8
+ st.markdown("```\nAndrii Demydenko - 317084\nMieszko Niewiarowski - xxxxxx\nPawe艂 Rajter - xxxxxx```")
9
+
10
+ st.header("Topic of the project")
11
+ st.write("**Topic**: Wykrywanie wypadk贸w (miejsce, rodzaj, czas) na podstawie wpis贸w w mediach spo艂eczno艣ciowych")
pages/run_models.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ pipe = pipeline( model="nlp-pw/test-model-2")
5
+ text = st.text_area("Enter your text here 馃槂")
6
+
7
+ if text:
8
+ out = pipe(text)
9
+ st.json(out)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+
4
+ wordcloud
5
+ st_pages