Update app.py
Browse files
app.py
CHANGED
|
@@ -28,15 +28,24 @@ languages_list = [("Gujarati", "gu_IN"), ('Hindi',"hi_IN") , ("Bengali","bn_IN")
|
|
| 28 |
("Marathi","mr_IN"), ("Tamil","ta_IN"), ("Telugu","te_IN")]
|
| 29 |
|
| 30 |
|
|
|
|
|
|
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
def intitalize_lang(language):
|
| 34 |
-
# translation_tokenizer.src_lang = "en_xx"
|
| 35 |
-
# encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 36 |
-
# generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id[language] )
|
| 37 |
-
# x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
| 38 |
-
print(language)
|
| 39 |
-
# return x
|
| 40 |
|
| 41 |
|
| 42 |
llm_model = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
@@ -182,7 +191,7 @@ def conversation(qa_chain, message, history):
|
|
| 182 |
# Generate response using QA chain
|
| 183 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
| 184 |
response_answer = response["answer"]
|
| 185 |
-
print(english_to_indian(response["answer"][:
|
| 186 |
if response_answer.find("Helpful Answer:") != -1:
|
| 187 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
| 188 |
response_sources = response["source_documents"]
|
|
@@ -212,18 +221,6 @@ def upload_file(file_obj):
|
|
| 212 |
return list_file_path
|
| 213 |
|
| 214 |
|
| 215 |
-
def english_to_indian(sentence):
|
| 216 |
-
translation_tokenizer.src_lang = "en_xx"
|
| 217 |
-
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 218 |
-
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id[lang_btn] )
|
| 219 |
-
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
def indian_to_english(sentence):
|
| 223 |
-
translation_tokenizer.src_lang = lang_btn
|
| 224 |
-
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 225 |
-
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] )
|
| 226 |
-
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 227 |
|
| 228 |
|
| 229 |
def demo():
|
|
@@ -278,7 +275,7 @@ def demo():
|
|
| 278 |
with gr.Row():
|
| 279 |
lang_btn = gr.Dropdown(languages_list, label="Languages", value = languages_list[1],
|
| 280 |
type="value", info="Choose your language",interactive = True)
|
| 281 |
-
|
| 282 |
|
| 283 |
chatbot = gr.Chatbot(height=300)
|
| 284 |
|
|
|
|
| 28 |
("Marathi","mr_IN"), ("Tamil","ta_IN"), ("Telugu","te_IN")]
|
| 29 |
|
| 30 |
|
| 31 |
+
def intitalize_lang(language):
|
| 32 |
+
return language
|
| 33 |
|
| 34 |
+
def english_to_indian(sentence):
|
| 35 |
+
lang = intitalize_lang()
|
| 36 |
+
translation_tokenizer.src_lang = "en_xx"
|
| 37 |
+
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 38 |
+
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id[lang] )
|
| 39 |
+
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def indian_to_english(sentence):
|
| 43 |
+
lang = intitalize_lang()
|
| 44 |
+
translation_tokenizer.src_lang = lang
|
| 45 |
+
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 46 |
+
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] )
|
| 47 |
+
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
llm_model = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
|
|
| 191 |
# Generate response using QA chain
|
| 192 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
| 193 |
response_answer = response["answer"]
|
| 194 |
+
print(english_to_indian(response["answer"][:500]))
|
| 195 |
if response_answer.find("Helpful Answer:") != -1:
|
| 196 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
| 197 |
response_sources = response["source_documents"]
|
|
|
|
| 221 |
return list_file_path
|
| 222 |
|
| 223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
|
| 226 |
def demo():
|
|
|
|
| 275 |
with gr.Row():
|
| 276 |
lang_btn = gr.Dropdown(languages_list, label="Languages", value = languages_list[1],
|
| 277 |
type="value", info="Choose your language",interactive = True)
|
| 278 |
+
lang_btn.select(intitalize_lang, inputs = lang_btn)
|
| 279 |
|
| 280 |
chatbot = gr.Chatbot(height=300)
|
| 281 |
|