atanu0491 commited on
Commit
0242d98
·
1 Parent(s): a146b1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -61
app.py CHANGED
@@ -56,65 +56,24 @@ if choice == 'ফাইল আপলোড':
56
  if uploaded_files is not None:
57
  search_word_def = uploaded_files.name.split('.')[0].split(' ')[-1]
58
  dataframe = pd.read_excel(uploaded_files)
59
- choice_new = st.selectbox('Any other word you want to search for',['Select', 'Yes', 'No'])
60
- if choice_new == 'Yes':
61
- search_word = st.text_input('Enter the words (if multiple word write using comma separated)', '')
62
- # model = load_model('best-model-002.pt')
63
- if st.button('search'):
64
- if search_word is not None:
65
- word_list = search_word.split(',')
66
- # st.write(word_list)
67
- for index, row in dataframe.iterrows():
68
- if pd.notnull(row['Unnamed: 2']):
69
- data = BasicTokenizer().tokenize(row['Unnamed: 2'])
70
- sentence = Sentence(data)
71
- model.predict(sentence)
72
-
73
- search_w_d = []
74
- search_w = []
75
- my_list = []
76
- for token in sentence:
77
- for word_l in word_list:
78
- st.write(word_l)
79
- if token.text == word_l:
80
- st.write(token.text)
81
- w = []
82
- w.append(token.text)
83
- w.append(token.tag)
84
- search_w.append("/".join(tuple(w)))
85
- if token.text == search_word_def:
86
- w_d = []
87
- w_d.append(token.text)
88
- w_d.append(token.tag)
89
- search_w_d.append("/".join(tuple(w_d)))
90
- word = []
91
- word.append(token.text)
92
- word.append(token.tag)
93
- my_list.append("/".join(tuple(word)))
94
- st.write(" ".join(my_list))
95
- # st.write(" ".join(search_w_d))
96
- st.write(" ".join(search_w))
97
- else:
98
- st.error('Please enter words', icon="🚨")
99
- if choice_new == 'No':
100
- for index, row in dataframe.iterrows():
101
- if pd.notnull(row['Unnamed: 2']):
102
- data = BasicTokenizer().tokenize(row['Unnamed: 2'])
103
- sentence = Sentence(data)
104
- model.predict(sentence)
105
-
106
- search_w_d = []
107
-
108
- my_list = []
109
- for token in sentence:
110
- if token.text == search_word_def:
111
- w_d = []
112
- w_d.append(token.text)
113
- w_d.append(token.tag)
114
- search_w_d.append("/".join(tuple(w_d)))
115
- word = []
116
- word.append(token.text)
117
- word.append(token.tag)
118
- my_list.append("/".join(tuple(word)))
119
  st.write(" ".join(my_list))
120
- st.write(" ".join(search_w_d))
 
56
  if uploaded_files is not None:
57
  search_word_def = uploaded_files.name.split('.')[0].split(' ')[-1]
58
  dataframe = pd.read_excel(uploaded_files)
59
+ for index, row in dataframe.iterrows():
60
+ if pd.notnull(row['Unnamed: 2']):
61
+ data = BasicTokenizer().tokenize(row['Unnamed: 2'])
62
+ sentence = Sentence(data)
63
+ model.predict(sentence)
64
+
65
+ search_w_d = []
66
+ search_w = []
67
+ my_list = []
68
+ for token in sentence:
69
+ if token.text == search_word_def:
70
+ w_d = []
71
+ w_d.append(token.text)
72
+ w_d.append(token.tag)
73
+ search_w_d.append("/".join(tuple(w_d)))
74
+ word = []
75
+ word.append(token.text)
76
+ word.append(token.tag)
77
+ my_list.append("/".join(tuple(word)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  st.write(" ".join(my_list))
79
+ st.write(" ".join(search_w_d))