Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,274 +1,4 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from phi.agent import Agent
|
| 3 |
-
from phi.model.groq import Groq
|
| 4 |
-
import os
|
| 5 |
-
import logging
|
| 6 |
-
from sentence_transformers import CrossEncoder
|
| 7 |
-
from backend.semantic_search import table, retriever
|
| 8 |
-
import numpy as np
|
| 9 |
-
from time import perf_counter
|
| 10 |
-
import requests
|
| 11 |
-
|
| 12 |
-
# Set up logging
|
| 13 |
-
logging.basicConfig(level=logging.INFO)
|
| 14 |
-
logger = logging.getLogger(__name__)
|
| 15 |
-
|
| 16 |
-
# API Key setup
|
| 17 |
-
api_key = os.getenv("GROQ_API_KEY")
|
| 18 |
-
if not api_key:
|
| 19 |
-
gr.Warning("GROQ_API_KEY not found. Set it in 'Repository secrets'.")
|
| 20 |
-
logger.error("GROQ_API_KEY not found.")
|
| 21 |
-
api_key = "" # Fallback to empty string, but this will fail without a key
|
| 22 |
-
else:
|
| 23 |
-
os.environ["GROQ_API_KEY"] = api_key
|
| 24 |
-
|
| 25 |
-
# Bhashini API setup
|
| 26 |
-
bhashini_api_key = os.getenv("API_KEY")
|
| 27 |
-
bhashini_user_id = os.getenv("USER_ID")
|
| 28 |
-
|
| 29 |
-
def bhashini_translate(text: str, from_code: str = "en", to_code: str = "hi") -> dict:
|
| 30 |
-
"""Translates text from source language to target language using the Bhashini API."""
|
| 31 |
-
if not text.strip():
|
| 32 |
-
print('Input text is empty. Please provide valid text for translation.')
|
| 33 |
-
return {"status_code": 400, "message": "Input text is empty", "translated_content": None}
|
| 34 |
-
else:
|
| 35 |
-
print('Input text - ', text)
|
| 36 |
-
print(f'Starting translation process from {from_code} to {to_code}...')
|
| 37 |
-
gr.Warning(f'Translating to {to_code}...')
|
| 38 |
-
|
| 39 |
-
url = 'https://meity-auth.ulcacontrib.org/ulca/apis/v0/model/getModelsPipeline'
|
| 40 |
-
headers = {
|
| 41 |
-
"Content-Type": "application/json",
|
| 42 |
-
"userID": bhashini_user_id,
|
| 43 |
-
"ulcaApiKey": bhashini_api_key
|
| 44 |
-
}
|
| 45 |
-
payload = {
|
| 46 |
-
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}}}],
|
| 47 |
-
"pipelineRequestConfig": {"pipelineId": "64392f96daac500b55c543cd"}
|
| 48 |
-
}
|
| 49 |
-
|
| 50 |
-
print('Sending initial request to get the pipeline...')
|
| 51 |
-
response = requests.post(url, json=payload, headers=headers)
|
| 52 |
-
|
| 53 |
-
if response.status_code != 200:
|
| 54 |
-
print(f'Error in initial request: {response.status_code}, Response: {response.text}')
|
| 55 |
-
return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
|
| 56 |
-
|
| 57 |
-
print('Initial request successful, processing response...')
|
| 58 |
-
response_data = response.json()
|
| 59 |
-
print('Full response data:', response_data) # Debug the full response
|
| 60 |
-
if "pipelineInferenceAPIEndPoint" not in response_data or "callbackUrl" not in response_data["pipelineInferenceAPIEndPoint"]:
|
| 61 |
-
print('Unexpected response structure:', response_data)
|
| 62 |
-
return {"status_code": 400, "message": "Unexpected API response structure", "translated_content": None}
|
| 63 |
-
|
| 64 |
-
service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
|
| 65 |
-
callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
|
| 66 |
-
|
| 67 |
-
print(f'Service ID: {service_id}, Callback URL: {callback_url}')
|
| 68 |
-
|
| 69 |
-
headers2 = {
|
| 70 |
-
"Content-Type": "application/json",
|
| 71 |
-
response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["name"]: response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["value"]
|
| 72 |
-
}
|
| 73 |
-
compute_payload = {
|
| 74 |
-
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}, "serviceId": service_id}}],
|
| 75 |
-
"inputData": {"input": [{"source": text}], "audio": [{"audioContent": None}]}
|
| 76 |
-
}
|
| 77 |
-
|
| 78 |
-
print(f'Sending translation request with text: "{text}"')
|
| 79 |
-
compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
|
| 80 |
-
|
| 81 |
-
if compute_response.status_code != 200:
|
| 82 |
-
print(f'Error in translation request: {compute_response.status_code}, Response: {compute_response.text}')
|
| 83 |
-
return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
|
| 84 |
-
|
| 85 |
-
print('Translation request successful, processing translation...')
|
| 86 |
-
compute_response_data = compute_response.json()
|
| 87 |
-
translated_content = compute_response_data["pipelineResponse"][0]["output"][0]["target"]
|
| 88 |
-
|
| 89 |
-
print(f'Translation successful. Translated content: "{translated_content}"')
|
| 90 |
-
return {"status_code": 200, "message": "Translation successful", "translated_content": translated_content}
|
| 91 |
-
|
| 92 |
-
# Initialize PhiData Agent
|
| 93 |
-
agent = Agent(
|
| 94 |
-
name="Science Education Assistant",
|
| 95 |
-
role="You are a helpful science tutor for 10th-grade students",
|
| 96 |
-
instructions=[
|
| 97 |
-
"You are an expert science teacher specializing in 10th-grade curriculum.",
|
| 98 |
-
"Provide clear, accurate, and age-appropriate explanations.",
|
| 99 |
-
"Use simple language and examples that students can understand.",
|
| 100 |
-
"Focus on concepts from physics, chemistry, and biology.",
|
| 101 |
-
"Structure responses with headings and bullet points when helpful.",
|
| 102 |
-
"Encourage learning and curiosity."
|
| 103 |
-
],
|
| 104 |
-
model=Groq(id="llama3-70b-8192", api_key=api_key),
|
| 105 |
-
markdown=True
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
# Response Generation Function
|
| 109 |
-
def retrieve_and_generate_response(query, cross_encoder_choice, history=None):
|
| 110 |
-
"""Generate response using semantic search and LLM"""
|
| 111 |
-
top_rerank = 25
|
| 112 |
-
top_k_rank = 20
|
| 113 |
-
|
| 114 |
-
if not query.strip():
|
| 115 |
-
return "Please provide a valid question."
|
| 116 |
-
|
| 117 |
-
try:
|
| 118 |
-
start_time = perf_counter()
|
| 119 |
-
|
| 120 |
-
# Encode query and search documents
|
| 121 |
-
query_vec = retriever.encode(query)
|
| 122 |
-
documents = table.search(query_vec, vector_column_name="vector").limit(top_rerank).to_list()
|
| 123 |
-
documents = [doc["text"] for doc in documents]
|
| 124 |
-
|
| 125 |
-
# Re-rank documents using cross-encoder
|
| 126 |
-
cross_encoder_model = CrossEncoder('BAAI/bge-reranker-base') if cross_encoder_choice == '(ACCURATE) BGE reranker' else CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
|
| 127 |
-
query_doc_pair = [[query, doc] for doc in documents]
|
| 128 |
-
cross_scores = cross_encoder_model.predict(query_doc_pair)
|
| 129 |
-
sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
|
| 130 |
-
documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
|
| 131 |
-
|
| 132 |
-
# Create context from top documents
|
| 133 |
-
context = "\n\n".join(documents[:10]) if documents else ""
|
| 134 |
-
context = f"Context information from educational materials:\n{context}\n\n"
|
| 135 |
-
|
| 136 |
-
# Add conversation history for context
|
| 137 |
-
history_context = ""
|
| 138 |
-
if history and len(history) > 0:
|
| 139 |
-
for user_msg, bot_msg in history[-2:]: # Last 2 exchanges
|
| 140 |
-
if user_msg and bot_msg:
|
| 141 |
-
history_context += f"Previous Q: {user_msg}\nPrevious A: {bot_msg}\n"
|
| 142 |
-
|
| 143 |
-
# Create full prompt
|
| 144 |
-
full_prompt = f"{history_context}{context}Question: {query}\n\nPlease answer the question using the context provided above. If the context doesn't contain relevant information, use your general knowledge about 10th-grade science topics."
|
| 145 |
-
|
| 146 |
-
# Generate response
|
| 147 |
-
response = agent.run(full_prompt)
|
| 148 |
-
response_text = response.content if hasattr(response, 'content') else str(response)
|
| 149 |
-
|
| 150 |
-
logger.info(f"Response generation took {perf_counter() - start_time:.2f} seconds")
|
| 151 |
-
return response_text
|
| 152 |
-
|
| 153 |
-
except Exception as e:
|
| 154 |
-
logger.error(f"Error in response generation: {e}")
|
| 155 |
-
return f"Error generating response: {str(e)}"
|
| 156 |
-
|
| 157 |
-
def simple_chat_function(message, history, cross_encoder_choice):
|
| 158 |
-
"""Chat function with semantic search and retriever integration"""
|
| 159 |
-
if not message.strip():
|
| 160 |
-
return "", history
|
| 161 |
-
|
| 162 |
-
# Generate response using the semantic search function
|
| 163 |
-
response = retrieve_and_generate_response(message, cross_encoder_choice, history)
|
| 164 |
-
|
| 165 |
-
# Add to history
|
| 166 |
-
history.append([message, response])
|
| 167 |
-
|
| 168 |
-
return "", history
|
| 169 |
-
|
| 170 |
-
def translate_text(selected_language, history):
|
| 171 |
-
"""Translate the last response in history to the selected language."""
|
| 172 |
-
iso_language_codes = {
|
| 173 |
-
"Hindi": "hi", "Gom": "gom", "Kannada": "kn", "Dogri": "doi", "Bodo": "brx", "Urdu": "ur",
|
| 174 |
-
"Tamil": "ta", "Kashmiri": "ks", "Assamese": "as", "Bengali": "bn", "Marathi": "mr",
|
| 175 |
-
"Sindhi": "sd", "Maithili": "mai", "Punjabi": "pa", "Malayalam": "ml", "Manipuri": "mni",
|
| 176 |
-
"Telugu": "te", "Sanskrit": "sa", "Nepali": "ne", "Santali": "sat", "Gujarati": "gu", "Odia": "or"
|
| 177 |
-
}
|
| 178 |
-
|
| 179 |
-
to_code = iso_language_codes[selected_language]
|
| 180 |
-
response_text = history[-1][1] if history and history[-1][1] else ''
|
| 181 |
-
print('response_text for translation', response_text)
|
| 182 |
-
translation = bhashini_translate(response_text, to_code=to_code)
|
| 183 |
-
return translation.get('translated_content', 'Translation failed.')
|
| 184 |
-
|
| 185 |
-
# Gradio Interface with layout template
|
| 186 |
-
with gr.Blocks(title="Science Chatbot", theme='gradio/soft') as demo:
|
| 187 |
-
# Header section
|
| 188 |
-
with gr.Row():
|
| 189 |
-
with gr.Column(scale=10):
|
| 190 |
-
gr.HTML(value="""<div style="color: #FF4500;"><h1>Welcome! I am your friend!</h1>Ask me !I will help you<h1><span style="color: #008000">I AM A CHATBOT FOR 10TH SCIENCE WITH TRANSLATION IN 22 LANGUAGES</span></h1></div>""")
|
| 191 |
-
gr.HTML(value=f"""<p style="font-family: sans-serif; font-size: 16px;">A free chat bot developed by K.M.RAMYASRI,TGT,GHS.SUTHUKENY using Open source LLMs for 10 std students</p>""")
|
| 192 |
-
gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;"> Suggestions may be sent to <a href="mailto:ramyasriraman2019@gmail.com" style="color: #00008B; font-style: italic;">ramyadevi1607@yahoo.com</a>.</p>""")
|
| 193 |
-
with gr.Column(scale=3):
|
| 194 |
-
try:
|
| 195 |
-
gr.Image(value='logo.png', height=200, width=200)
|
| 196 |
-
except:
|
| 197 |
-
gr.HTML("<div style='height: 200px; width: 200px; background-color: #f0f0f0; display: flex; align-items: center; justify-content: center;'>Logo</div>")
|
| 198 |
-
|
| 199 |
-
# Chat and input components
|
| 200 |
-
chatbot = gr.Chatbot(
|
| 201 |
-
[],
|
| 202 |
-
elem_id="chatbot",
|
| 203 |
-
avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
|
| 204 |
-
'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
|
| 205 |
-
bubble_full_width=False,
|
| 206 |
-
show_copy_button=True,
|
| 207 |
-
show_share_button=True,
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
with gr.Row():
|
| 211 |
-
msg = gr.Textbox(
|
| 212 |
-
scale=3,
|
| 213 |
-
show_label=False,
|
| 214 |
-
placeholder="Enter text and press enter",
|
| 215 |
-
container=False,
|
| 216 |
-
)
|
| 217 |
-
submit_btn = gr.Button(value="Submit text", scale=1, variant="primary")
|
| 218 |
-
|
| 219 |
-
# Additional controls
|
| 220 |
-
cross_encoder = gr.Radio(
|
| 221 |
-
choices=['(FAST) MiniLM-L6v2', '(ACCURATE) BGE reranker'],
|
| 222 |
-
value='(ACCURATE) BGE reranker',
|
| 223 |
-
label="Embeddings Model",
|
| 224 |
-
info="Select the model for document ranking"
|
| 225 |
-
)
|
| 226 |
-
language_dropdown = gr.Dropdown(
|
| 227 |
-
choices=[
|
| 228 |
-
"Hindi", "Gom", "Kannada", "Dogri", "Bodo", "Urdu", "Tamil", "Kashmiri", "Assamese", "Bengali", "Marathi",
|
| 229 |
-
"Sindhi", "Maithili", "Punjabi", "Malayalam", "Manipuri", "Telugu", "Sanskrit", "Nepali", "Santali",
|
| 230 |
-
"Gujarati", "Odia"
|
| 231 |
-
],
|
| 232 |
-
value="Hindi",
|
| 233 |
-
label="Select Language for Translation"
|
| 234 |
-
)
|
| 235 |
-
translated_textbox = gr.Textbox(label="Translated Response")
|
| 236 |
-
|
| 237 |
-
# Event handlers
|
| 238 |
-
def update_chat_and_translate(message, history, cross_encoder_choice, selected_language):
|
| 239 |
-
if not message.strip():
|
| 240 |
-
return "", history, ""
|
| 241 |
-
|
| 242 |
-
# Generate response
|
| 243 |
-
response = retrieve_and_generate_response(message, cross_encoder_choice, history)
|
| 244 |
-
history.append([message, response])
|
| 245 |
-
|
| 246 |
-
# Translate response
|
| 247 |
-
translated_text = translate_text(selected_language, history)
|
| 248 |
-
|
| 249 |
-
return "", history, translated_text
|
| 250 |
-
|
| 251 |
-
msg.submit(update_chat_and_translate, [msg, chatbot, cross_encoder, language_dropdown], [msg, chatbot, translated_textbox])
|
| 252 |
-
submit_btn.click(update_chat_and_translate, [msg, chatbot, cross_encoder, language_dropdown], [msg, chatbot, translated_textbox])
|
| 253 |
-
|
| 254 |
-
clear = gr.Button("Clear Conversation")
|
| 255 |
-
clear.click(lambda: ([], "", ""), outputs=[chatbot, msg, translated_textbox])
|
| 256 |
-
|
| 257 |
-
# Example questions
|
| 258 |
-
gr.Examples(
|
| 259 |
-
examples=[
|
| 260 |
-
'What is the difference between metals and non-metals?',
|
| 261 |
-
'What is an ionic bond?',
|
| 262 |
-
'Explain asexual reproduction',
|
| 263 |
-
'What is photosynthesis?',
|
| 264 |
-
'Explain Newton\'s laws of motion'
|
| 265 |
-
],
|
| 266 |
-
inputs=msg,
|
| 267 |
-
label="Try these example questions:"
|
| 268 |
-
)
|
| 269 |
-
|
| 270 |
-
if __name__ == "__main__":
|
| 271 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)# import gradio as gr
|
| 272 |
# from phi.agent import Agent
|
| 273 |
# from phi.model.groq import Groq
|
| 274 |
# import os
|
|
@@ -278,6 +8,7 @@ if __name__ == "__main__":
|
|
| 278 |
# import numpy as np
|
| 279 |
# from time import perf_counter
|
| 280 |
# import requests
|
|
|
|
| 281 |
|
| 282 |
# # Set up logging
|
| 283 |
# logging.basicConfig(level=logging.INFO)
|
|
@@ -321,11 +52,16 @@ if __name__ == "__main__":
|
|
| 321 |
# response = requests.post(url, json=payload, headers=headers)
|
| 322 |
|
| 323 |
# if response.status_code != 200:
|
| 324 |
-
# print(f'Error in initial request: {response.status_code}')
|
| 325 |
# return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
|
| 326 |
|
| 327 |
# print('Initial request successful, processing response...')
|
| 328 |
# response_data = response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
# service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
|
| 330 |
# callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
|
| 331 |
|
|
@@ -344,7 +80,7 @@ if __name__ == "__main__":
|
|
| 344 |
# compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
|
| 345 |
|
| 346 |
# if compute_response.status_code != 200:
|
| 347 |
-
# print(f'Error in translation request: {compute_response.status_code}')
|
| 348 |
# return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
|
| 349 |
|
| 350 |
# print('Translation request successful, processing translation...')
|
|
@@ -369,6 +105,12 @@ if __name__ == "__main__":
|
|
| 369 |
# model=Groq(id="llama3-70b-8192", api_key=api_key),
|
| 370 |
# markdown=True
|
| 371 |
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
# # Response Generation Function
|
| 374 |
# def retrieve_and_generate_response(query, cross_encoder_choice, history=None):
|
|
@@ -534,213 +276,3 @@ if __name__ == "__main__":
|
|
| 534 |
|
| 535 |
# if __name__ == "__main__":
|
| 536 |
# demo.launch(server_name="0.0.0.0", server_port=7860)# import gradio as gr
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
# from phi.agent import Agent
|
| 540 |
-
# from phi.model.groq import Groq
|
| 541 |
-
# import os
|
| 542 |
-
# import logging
|
| 543 |
-
# from sentence_transformers import CrossEncoder
|
| 544 |
-
# from backend.semantic_search import table, retriever
|
| 545 |
-
# import numpy as np
|
| 546 |
-
# from time import perf_counter
|
| 547 |
-
|
| 548 |
-
# # Set up logging
|
| 549 |
-
# logging.basicConfig(level=logging.INFO)
|
| 550 |
-
# logger = logging.getLogger(__name__)
|
| 551 |
-
|
| 552 |
-
# # API Key setup
|
| 553 |
-
# api_key = os.getenv("GROQ_API_KEY")
|
| 554 |
-
# if not api_key:
|
| 555 |
-
# gr.Warning("GROQ_API_KEY not found. Set it in 'Repository secrets'.")
|
| 556 |
-
# logger.error("GROQ_API_KEY not found.")
|
| 557 |
-
# api_key = "" # Fallback to empty string, but this will fail without a key
|
| 558 |
-
# else:
|
| 559 |
-
# os.environ["GROQ_API_KEY"] = api_key
|
| 560 |
-
|
| 561 |
-
# # Initialize PhiData Agent
|
| 562 |
-
# agent = Agent(
|
| 563 |
-
# name="Science Education Assistant",
|
| 564 |
-
# role="You are a helpful science tutor for 10th-grade students",
|
| 565 |
-
# instructions=[
|
| 566 |
-
# "You are an expert science teacher specializing in 10th-grade curriculum.",
|
| 567 |
-
# "Provide clear, accurate, and age-appropriate explanations.",
|
| 568 |
-
# "Use simple language and examples that students can understand.",
|
| 569 |
-
# "Focus on concepts from physics, chemistry, and biology.",
|
| 570 |
-
# "Structure responses with headings and bullet points when helpful.",
|
| 571 |
-
# "Encourage learning and curiosity."
|
| 572 |
-
# ],
|
| 573 |
-
# model=Groq(id="llama3-70b-8192", api_key=api_key),
|
| 574 |
-
# markdown=True
|
| 575 |
-
# )
|
| 576 |
-
|
| 577 |
-
# # Response Generation Function
|
| 578 |
-
# def retrieve_and_generate_response(query, cross_encoder_choice, history=None):
|
| 579 |
-
# """Generate response using semantic search and LLM"""
|
| 580 |
-
# top_rerank = 25
|
| 581 |
-
# top_k_rank = 20
|
| 582 |
-
|
| 583 |
-
# if not query.strip():
|
| 584 |
-
# return "Please provide a valid question."
|
| 585 |
-
|
| 586 |
-
# try:
|
| 587 |
-
# start_time = perf_counter()
|
| 588 |
-
|
| 589 |
-
# # Encode query and search documents
|
| 590 |
-
# query_vec = retriever.encode(query)
|
| 591 |
-
# documents = table.search(query_vec, vector_column_name="vector").limit(top_rerank).to_list()
|
| 592 |
-
# documents = [doc["text"] for doc in documents]
|
| 593 |
-
|
| 594 |
-
# # Re-rank documents using cross-encoder
|
| 595 |
-
# cross_encoder_model = CrossEncoder('BAAI/bge-reranker-base') if cross_encoder_choice == '(ACCURATE) BGE reranker' else CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
|
| 596 |
-
# query_doc_pair = [[query, doc] for doc in documents]
|
| 597 |
-
# cross_scores = cross_encoder_model.predict(query_doc_pair)
|
| 598 |
-
# sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
|
| 599 |
-
# documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
|
| 600 |
-
|
| 601 |
-
# # Create context from top documents
|
| 602 |
-
# context = "\n\n".join(documents[:10]) if documents else ""
|
| 603 |
-
# context = f"Context information from educational materials:\n{context}\n\n"
|
| 604 |
-
|
| 605 |
-
# # Add conversation history for context
|
| 606 |
-
# history_context = ""
|
| 607 |
-
# if history and len(history) > 0:
|
| 608 |
-
# for user_msg, bot_msg in history[-2:]: # Last 2 exchanges
|
| 609 |
-
# if user_msg and bot_msg:
|
| 610 |
-
# history_context += f"Previous Q: {user_msg}\nPrevious A: {bot_msg}\n"
|
| 611 |
-
|
| 612 |
-
# # Create full prompt
|
| 613 |
-
# full_prompt = f"{history_context}{context}Question: {query}\n\nPlease answer the question using the context provided above. If the context doesn't contain relevant information, use your general knowledge about 10th-grade science topics."
|
| 614 |
-
|
| 615 |
-
# # Generate response
|
| 616 |
-
# response = agent.run(full_prompt)
|
| 617 |
-
# response_text = response.content if hasattr(response, 'content') else str(response)
|
| 618 |
-
|
| 619 |
-
# logger.info(f"Response generation took {perf_counter() - start_time:.2f} seconds")
|
| 620 |
-
# return response_text
|
| 621 |
-
|
| 622 |
-
# except Exception as e:
|
| 623 |
-
# logger.error(f"Error in response generation: {e}")
|
| 624 |
-
# return f"Error generating response: {str(e)}"
|
| 625 |
-
|
| 626 |
-
# def simple_chat_function(message, history, cross_encoder_choice):
|
| 627 |
-
# """Chat function with semantic search and retriever integration"""
|
| 628 |
-
# if not message.strip():
|
| 629 |
-
# return "", history
|
| 630 |
-
|
| 631 |
-
# # Generate response using the semantic search function
|
| 632 |
-
# response = retrieve_and_generate_response(message, cross_encoder_choice, history)
|
| 633 |
-
|
| 634 |
-
# # Add to history
|
| 635 |
-
# history.append([message, response])
|
| 636 |
-
|
| 637 |
-
# return "", history
|
| 638 |
-
|
| 639 |
-
# # Minimal working interface
|
| 640 |
-
# with gr.Blocks(title="Science Chatbot") as demo:
|
| 641 |
-
# # Cross-encoder selection
|
| 642 |
-
# cross_encoder = gr.Radio(
|
| 643 |
-
# choices=['(FAST) MiniLM-L6v2', '(ACCURATE) BGE reranker'],
|
| 644 |
-
# value='(ACCURATE) BGE reranker',
|
| 645 |
-
# label="Embeddings Model",
|
| 646 |
-
# info="Select the model for document ranking"
|
| 647 |
-
# )
|
| 648 |
-
|
| 649 |
-
# chatbot = gr.Chatbot(label="Science Tutor Conversation")
|
| 650 |
-
# msg = gr.Textbox(placeholder="Type your message here...")
|
| 651 |
-
# clear = gr.Button("Clear")
|
| 652 |
-
|
| 653 |
-
# msg.submit(simple_chat_function, [msg, chatbot, cross_encoder], [msg, chatbot])
|
| 654 |
-
# clear.click(lambda: ([], ""), outputs=[chatbot, msg])
|
| 655 |
-
|
| 656 |
-
# if __name__ == "__main__":
|
| 657 |
-
# demo.launch()# import gradio as gr
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
# from phi.agent import Agent
|
| 663 |
-
# from phi.model.groq import Groq
|
| 664 |
-
# import os
|
| 665 |
-
# import logging
|
| 666 |
-
# from sentence_transformers import SentenceTransformer
|
| 667 |
-
# from typing import List
|
| 668 |
-
|
| 669 |
-
# # Set up logging
|
| 670 |
-
# logging.basicConfig(level=logging.INFO)
|
| 671 |
-
# logger = logging.getLogger(__name__)
|
| 672 |
-
|
| 673 |
-
# # API Key setup
|
| 674 |
-
# api_key = os.getenv("GROQ_API_KEY")
|
| 675 |
-
# if not api_key:
|
| 676 |
-
# gr.Warning("GROQ_API_KEY not found. Set it in 'Repository secrets'.")
|
| 677 |
-
# logger.error("GROQ_API_KEY not found.")
|
| 678 |
-
# api_key = "" # Fallback to empty string, but this will fail without a key
|
| 679 |
-
# else:
|
| 680 |
-
# os.environ["GROQ_API_KEY"] = api_key
|
| 681 |
-
|
| 682 |
-
# # Initialize PhiData Agent
|
| 683 |
-
# agent = Agent(
|
| 684 |
-
# model=Groq(model="llama3-70b-8192", api_key=api_key),
|
| 685 |
-
# instructions=[
|
| 686 |
-
# "You are a helpful assistant designed to answer questions on various topics.",
|
| 687 |
-
# "Use the provided context from retrieved documents to answer questions.",
|
| 688 |
-
# "If you don't have enough information, say 'I don’t have enough information to answer that.'"
|
| 689 |
-
# ],
|
| 690 |
-
# markdown=True
|
| 691 |
-
# )
|
| 692 |
-
|
| 693 |
-
# # Load a simple embedding model
|
| 694 |
-
# embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 695 |
-
|
| 696 |
-
# # Simulated document corpus
|
| 697 |
-
# documents = [
|
| 698 |
-
# "The capital of France is Paris.",
|
| 699 |
-
# "Python is a popular programming language.",
|
| 700 |
-
# "Semantic search uses embeddings to find relevant documents.",
|
| 701 |
-
# "The Eiffel Tower is located in Paris."
|
| 702 |
-
# ]
|
| 703 |
-
|
| 704 |
-
# # Convert documents to embeddings and store them
|
| 705 |
-
# document_embeddings = embedding_model.encode(documents, convert_to_tensor=True)
|
| 706 |
-
# import numpy as np
|
| 707 |
-
# def retrieve_documents(query: str, k: int = 2) -> List[str]:
|
| 708 |
-
# """Simple retriever using cosine similarity."""
|
| 709 |
-
# query_embedding = embedding_model.encode(query, convert_to_tensor=True)
|
| 710 |
-
# similarities = np.dot(document_embeddings, query_embedding.T).cpu().numpy()
|
| 711 |
-
# top_k_indices = similarities.argsort()[-k:][::-1]
|
| 712 |
-
# return [documents[i] for i in top_k_indices]
|
| 713 |
-
|
| 714 |
-
# def simple_chat_function(message, history):
|
| 715 |
-
# """Chat function with semantic search and retriever integration"""
|
| 716 |
-
# if not message.strip():
|
| 717 |
-
# return "", history
|
| 718 |
-
|
| 719 |
-
# # Retrieve relevant documents
|
| 720 |
-
# context = retrieve_documents(message)
|
| 721 |
-
# context_text = "\n".join(context) if context else "No relevant context found."
|
| 722 |
-
|
| 723 |
-
# # Generate response using PhiData agent with context
|
| 724 |
-
# try:
|
| 725 |
-
# response = agent.run(f"Context: {context_text}\n\nQuestion: {message}")
|
| 726 |
-
# response_text = response.content if hasattr(response, 'content') else "Error generating response."
|
| 727 |
-
# except Exception as e:
|
| 728 |
-
# logger.error(f"Agent error: {e}")
|
| 729 |
-
# response_text = "Sorry, there was an error processing your request."
|
| 730 |
-
|
| 731 |
-
# # Add to history
|
| 732 |
-
# history.append([message, response_text])
|
| 733 |
-
|
| 734 |
-
# return "", history
|
| 735 |
-
|
| 736 |
-
# # Minimal working interface
|
| 737 |
-
# with gr.Blocks() as demo:
|
| 738 |
-
# chatbot = gr.Chatbot()
|
| 739 |
-
# msg = gr.Textbox(placeholder="Type your message here...")
|
| 740 |
-
# clear = gr.Button("Clear")
|
| 741 |
-
|
| 742 |
-
# msg.submit(simple_chat_function, [msg, chatbot], [msg, chatbot])
|
| 743 |
-
# clear.click(lambda: ([], ""), outputs=[chatbot, msg])
|
| 744 |
-
|
| 745 |
-
# if __name__ == "__main__":
|
| 746 |
-
# demo.launch()
|
|
|
|
| 1 |
+
# import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
# from phi.agent import Agent
|
| 3 |
# from phi.model.groq import Groq
|
| 4 |
# import os
|
|
|
|
| 8 |
# import numpy as np
|
| 9 |
# from time import perf_counter
|
| 10 |
# import requests
|
| 11 |
+
# from jinja2 import Environment, FileSystemLoader
|
| 12 |
|
| 13 |
# # Set up logging
|
| 14 |
# logging.basicConfig(level=logging.INFO)
|
|
|
|
| 52 |
# response = requests.post(url, json=payload, headers=headers)
|
| 53 |
|
| 54 |
# if response.status_code != 200:
|
| 55 |
+
# print(f'Error in initial request: {response.status_code}, Response: {response.text}')
|
| 56 |
# return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
|
| 57 |
|
| 58 |
# print('Initial request successful, processing response...')
|
| 59 |
# response_data = response.json()
|
| 60 |
+
# print('Full response data:', response_data) # Debug the full response
|
| 61 |
+
# if "pipelineInferenceAPIEndPoint" not in response_data or "callbackUrl" not in response_data["pipelineInferenceAPIEndPoint"]:
|
| 62 |
+
# print('Unexpected response structure:', response_data)
|
| 63 |
+
# return {"status_code": 400, "message": "Unexpected API response structure", "translated_content": None}
|
| 64 |
+
|
| 65 |
# service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
|
| 66 |
# callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
|
| 67 |
|
|
|
|
| 80 |
# compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
|
| 81 |
|
| 82 |
# if compute_response.status_code != 200:
|
| 83 |
+
# print(f'Error in translation request: {compute_response.status_code}, Response: {compute_response.text}')
|
| 84 |
# return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
|
| 85 |
|
| 86 |
# print('Translation request successful, processing translation...')
|
|
|
|
| 105 |
# model=Groq(id="llama3-70b-8192", api_key=api_key),
|
| 106 |
# markdown=True
|
| 107 |
# )
|
| 108 |
+
# # Set up Jinja2 environment
|
| 109 |
+
# proj_dir = Path(__file__).parent
|
| 110 |
+
# env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# template_html = env.get_template('template_html.j2')
|
| 114 |
|
| 115 |
# # Response Generation Function
|
| 116 |
# def retrieve_and_generate_response(query, cross_encoder_choice, history=None):
|
|
|
|
| 276 |
|
| 277 |
# if __name__ == "__main__":
|
| 278 |
# demo.launch(server_name="0.0.0.0", server_port=7860)# import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|