atanu0491 commited on
Commit
f189b6f
·
1 Parent(s): 57473fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -86
app.py CHANGED
@@ -49,6 +49,7 @@ if choice == 'টেক্সট ইনপুট':
49
 
50
 
51
 
 
52
  # if choice == 'ফাইল আপলোড':
53
  # uploaded_files = st.file_uploader("Choose a CSV file")
54
 
@@ -57,95 +58,117 @@ if choice == 'টেক্সট ইনপুট':
57
  # dataframe = pd.read_excel(uploaded_files)
58
  # search_word = st.text_input('Any other word you want to search for', '')
59
  # # model = load_model('best-model-002.pt')
60
-
61
- # for index, row in dataframe.iterrows():
62
-
63
- # if pd.notnull(row['Unnamed: 2']):
64
- # data = BasicTokenizer().tokenize(row['Unnamed: 2'])
65
- # sentence = Sentence(data)
66
- # model.predict(sentence)
67
-
68
- # search_w_d = []
69
- # search_w = []
70
- # my_list = []
71
- # for token in sentence:
72
- # if search_word is not None:
73
- # if token.text == search_word:
74
- # w = []
75
- # w.append(token.text)
76
- # w.append(token.tag)
77
- # search_w.append("/".join(tuple(w)))
78
-
79
- # if token.text == search_word_def:
80
- # w_d = []
81
-
82
- # w_d.append(token.text)
83
- # w_d.append(token.tag)
84
-
85
- # search_w_d.append("/".join(tuple(w_d)))
86
- # word = []
87
- # word.append(token.text)
88
- # word.append(token.tag)
89
- # my_list.append("/".join(tuple(word)))
90
- # st.write(" ".join(my_list))
91
- # st.write(" ".join(search_w_d))
92
- # st.write(" ".join(search_w))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  if choice == 'ফাইল আপলোড':
94
  uploaded_files = st.file_uploader("Choose a CSV file")
95
 
96
  if uploaded_files is not None:
97
  search_word_def = uploaded_files.name.split('.')[0].split(' ')[-1]
98
  dataframe = pd.read_excel(uploaded_files)
99
- search_word = st.text_input('Any other word you want to search for', '')
100
- # model = load_model('best-model-002.pt')
101
- if st.button('search'):
102
- if search_word is not None:
103
- for index, row in dataframe.iterrows():
104
- if pd.notnull(row['Unnamed: 2']):
105
- data = BasicTokenizer().tokenize(row['Unnamed: 2'])
106
- sentence = Sentence(data)
107
- model.predict(sentence)
108
-
109
- search_w_d = []
110
- search_w = []
111
- my_list = []
112
- for token in sentence:
113
- if token.text == search_word:
114
- w = []
115
- w.append(token.text)
116
- w.append(token.tag)
117
- search_w.append("/".join(tuple(w)))
118
- if token.text == search_word_def:
119
- w_d = []
120
- w_d.append(token.text)
121
- w_d.append(token.tag)
122
- search_w_d.append("/".join(tuple(w_d)))
123
- word = []
124
- word.append(token.text)
125
- word.append(token.tag)
126
- my_list.append("/".join(tuple(word)))
127
- st.write(" ".join(my_list))
128
- st.write(" ".join(search_w_d))
129
- st.write(" ".join(search_w))
130
- else:
131
- for index, row in dataframe.iterrows():
132
- if pd.notnull(row['Unnamed: 2']):
133
- data = BasicTokenizer().tokenize(row['Unnamed: 2'])
134
- sentence = Sentence(data)
135
- model.predict(sentence)
136
-
137
- search_w_d = []
138
-
139
- my_list = []
140
- for token in sentence:
141
- if token.text == search_word_def:
142
- w_d = []
143
- w_d.append(token.text)
144
- w_d.append(token.tag)
145
- search_w_d.append("/".join(tuple(w_d)))
146
- word = []
147
- word.append(token.text)
148
- word.append(token.tag)
149
- my_list.append("/".join(tuple(word)))
150
- st.write(" ".join(my_list))
151
- st.write(" ".join(search_w_d))
 
 
 
 
 
49
 
50
 
51
 
52
+
53
  # if choice == 'ফাইল আপলোড':
54
  # uploaded_files = st.file_uploader("Choose a CSV file")
55
 
 
58
  # dataframe = pd.read_excel(uploaded_files)
59
  # search_word = st.text_input('Any other word you want to search for', '')
60
  # # model = load_model('best-model-002.pt')
61
+ # if st.button('search'):
62
+ # if search_word is not None:
63
+ # for index, row in dataframe.iterrows():
64
+ # if pd.notnull(row['Unnamed: 2']):
65
+ # data = BasicTokenizer().tokenize(row['Unnamed: 2'])
66
+ # sentence = Sentence(data)
67
+ # model.predict(sentence)
68
+
69
+ # search_w_d = []
70
+ # search_w = []
71
+ # my_list = []
72
+ # for token in sentence:
73
+ # if token.text == search_word:
74
+ # w = []
75
+ # w.append(token.text)
76
+ # w.append(token.tag)
77
+ # search_w.append("/".join(tuple(w)))
78
+ # if token.text == search_word_def:
79
+ # w_d = []
80
+ # w_d.append(token.text)
81
+ # w_d.append(token.tag)
82
+ # search_w_d.append("/".join(tuple(w_d)))
83
+ # word = []
84
+ # word.append(token.text)
85
+ # word.append(token.tag)
86
+ # my_list.append("/".join(tuple(word)))
87
+ # st.write(" ".join(my_list))
88
+ # st.write(" ".join(search_w_d))
89
+ # st.write(" ".join(search_w))
90
+ # else:
91
+ # for index, row in dataframe.iterrows():
92
+ # if pd.notnull(row['Unnamed: 2']):
93
+ # data = BasicTokenizer().tokenize(row['Unnamed: 2'])
94
+ # sentence = Sentence(data)
95
+ # model.predict(sentence)
96
+
97
+ # search_w_d = []
98
+
99
+ # my_list = []
100
+ # for token in sentence:
101
+ # if token.text == search_word_def:
102
+ # w_d = []
103
+ # w_d.append(token.text)
104
+ # w_d.append(token.tag)
105
+ # search_w_d.append("/".join(tuple(w_d)))
106
+ # word = []
107
+ # word.append(token.text)
108
+ # word.append(token.tag)
109
+ # my_list.append("/".join(tuple(word)))
110
+ # st.write(" ".join(my_list))
111
+ # st.write(" ".join(search_w_d))
112
  if choice == 'ফাইল আপলোড':
113
  uploaded_files = st.file_uploader("Choose a CSV file")
114
 
115
  if uploaded_files is not None:
116
  search_word_def = uploaded_files.name.split('.')[0].split(' ')[-1]
117
  dataframe = pd.read_excel(uploaded_files)
118
+ choice_new = st.selectbox('আপনি কিভাবে এটি প্রক্রিয়া করতে চান?',['Select', 'Yes', 'No'])
119
+ if choice_new == 'Yes':
120
+ search_word = st.text_input('Any other word you want to search for', '')
121
+ # model = load_model('best-model-002.pt')
122
+ if st.button('search'):
123
+ if search_word is not None:
124
+ for index, row in dataframe.iterrows():
125
+ if pd.notnull(row['Unnamed: 2']):
126
+ data = BasicTokenizer().tokenize(row['Unnamed: 2'])
127
+ sentence = Sentence(data)
128
+ model.predict(sentence)
129
+
130
+ search_w_d = []
131
+ search_w = []
132
+ my_list = []
133
+ for token in sentence:
134
+ if token.text == search_word:
135
+ w = []
136
+ w.append(token.text)
137
+ w.append(token.tag)
138
+ search_w.append("/".join(tuple(w)))
139
+ if token.text == search_word_def:
140
+ w_d = []
141
+ w_d.append(token.text)
142
+ w_d.append(token.tag)
143
+ search_w_d.append("/".join(tuple(w_d)))
144
+ word = []
145
+ word.append(token.text)
146
+ word.append(token.tag)
147
+ my_list.append("/".join(tuple(word)))
148
+ st.write(" ".join(my_list))
149
+ st.write(" ".join(search_w_d))
150
+ st.write(" ".join(search_w))
151
+ else:
152
+ st.error('Please enter words', icon="🚨")
153
+ if choice_new == 'No':
154
+ for index, row in dataframe.iterrows():
155
+ if pd.notnull(row['Unnamed: 2']):
156
+ data = BasicTokenizer().tokenize(row['Unnamed: 2'])
157
+ sentence = Sentence(data)
158
+ model.predict(sentence)
159
+
160
+ search_w_d = []
161
+
162
+ my_list = []
163
+ for token in sentence:
164
+ if token.text == search_word_def:
165
+ w_d = []
166
+ w_d.append(token.text)
167
+ w_d.append(token.tag)
168
+ search_w_d.append("/".join(tuple(w_d)))
169
+ word = []
170
+ word.append(token.text)
171
+ word.append(token.tag)
172
+ my_list.append("/".join(tuple(word)))
173
+ st.write(" ".join(my_list))
174
+ st.write(" ".join(search_w_d))