Spaces:
Sleeping
Sleeping
Update rag.py
Browse files
rag.py
CHANGED
|
@@ -1,286 +1,138 @@
|
|
| 1 |
import json
|
| 2 |
-
|
| 3 |
-
from sentence_transformers import SentenceTransformer, util
|
| 4 |
-
|
| 5 |
-
from groq import Groq
|
| 6 |
-
|
| 7 |
-
from datetime import datetime
|
| 8 |
-
|
| 9 |
import os
|
| 10 |
-
|
| 11 |
import pandas as pd
|
| 12 |
-
|
| 13 |
-
from datasets import load_dataset, Dataset
|
| 14 |
-
|
| 15 |
from dotenv import load_dotenv
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
import
|
| 20 |
-
|
| 21 |
-
|
| 22 |
|
| 23 |
# Load environment variables
|
| 24 |
-
|
| 25 |
load_dotenv()
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
# Initialize Groq client
|
| 30 |
-
|
| 31 |
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
# Load similarity model
|
| 36 |
-
|
| 37 |
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
# Config
|
| 42 |
-
|
| 43 |
HF_DATASET_REPO = "midrees2806/unmatched_queries"
|
| 44 |
-
|
| 45 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
# Greeting list
|
| 50 |
-
|
| 51 |
GREETINGS = [
|
| 52 |
-
|
| 53 |
"hi", "hello", "hey", "good morning", "good afternoon", "good evening",
|
| 54 |
-
|
| 55 |
-
"assalam o alaikum", "salam", "aoa", "hi there",
|
| 56 |
-
|
| 57 |
-
"hey there", "greetings"
|
| 58 |
-
|
| 59 |
]
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# Fixed rephrased unmatched query responses
|
| 64 |
-
|
| 65 |
-
UNMATCHED_RESPONSES = [
|
| 66 |
-
|
| 67 |
-
"Thank you for your query. We’ve forwarded it to our support team and it will be added soon. In the meantime, you can visit the University of Education official website or reach out via the contact details below.\n\n📞 +92-42-99262231-33\n✉️ info@ue.edu.pk\n🌐 https://ue.edu.pk",
|
| 68 |
-
|
| 69 |
-
"We’ve noted your question and it’s in queue for inclusion. For now, please check the University of Education website or contact the administration directly.\n\n📞 +92-42-99262231-33\n✉️ info@ue.edu.pk\n🌐 https://ue.edu.pk",
|
| 70 |
-
|
| 71 |
-
"Your query has been recorded. We’ll update the system with relevant information shortly. Meanwhile, you can visit UE's official site or reach out using the details below:\n\n📞 +92-42-99262231-33\n✉️ info@ue.edu.pk\n🌐 https://ue.edu.pk",
|
| 72 |
-
|
| 73 |
-
"We appreciate your question. It has been forwarded for further processing. Until it’s available here, feel free to visit the official UE website or use the contact options:\n\n📞 +92-42-99262231-33\n✉️ info@ue.edu.pk\n🌐 https://ue.edu.pk"
|
| 74 |
-
|
| 75 |
-
]
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
# Load multiple JSON datasets
|
| 80 |
-
|
| 81 |
dataset = []
|
| 82 |
-
|
| 83 |
try:
|
| 84 |
-
|
| 85 |
json_files = glob.glob('datasets/*.json')
|
| 86 |
-
|
| 87 |
for file_path in json_files:
|
| 88 |
-
|
| 89 |
with open(file_path, 'r', encoding='utf-8') as f:
|
| 90 |
-
|
| 91 |
data = json.load(f)
|
| 92 |
-
|
| 93 |
if isinstance(data, list):
|
| 94 |
-
|
| 95 |
for item in data:
|
| 96 |
-
|
| 97 |
if isinstance(item, dict) and 'Question' in item and 'Answer' in item:
|
| 98 |
-
|
| 99 |
dataset.append(item)
|
| 100 |
-
|
| 101 |
-
else:
|
| 102 |
-
|
| 103 |
-
print(f"Invalid entry in {file_path}: {item}")
|
| 104 |
-
|
| 105 |
-
else:
|
| 106 |
-
|
| 107 |
-
print(f"File {file_path} does not contain a list.")
|
| 108 |
-
|
| 109 |
except Exception as e:
|
| 110 |
-
|
| 111 |
print(f"Error loading datasets: {e}")
|
| 112 |
|
| 113 |
-
|
| 114 |
-
|
| 115 |
# Precompute embeddings
|
| 116 |
-
|
| 117 |
dataset_questions = [item.get("Question", "").lower().strip() for item in dataset]
|
| 118 |
-
|
| 119 |
dataset_answers = [item.get("Answer", "") for item in dataset]
|
| 120 |
-
|
| 121 |
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
# Save unmatched queries to Hugging Face
|
| 126 |
-
|
| 127 |
def manage_unmatched_queries(query: str):
|
| 128 |
-
|
| 129 |
try:
|
| 130 |
-
|
| 131 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 132 |
-
|
| 133 |
try:
|
| 134 |
-
|
| 135 |
ds = load_dataset(HF_DATASET_REPO, token=HF_TOKEN)
|
| 136 |
-
|
| 137 |
df = ds["train"].to_pandas()
|
| 138 |
-
|
| 139 |
except:
|
| 140 |
-
|
| 141 |
df = pd.DataFrame(columns=["Query", "Timestamp", "Processed"])
|
| 142 |
-
|
| 143 |
if query not in df["Query"].values:
|
| 144 |
-
|
| 145 |
new_entry = {"Query": query, "Timestamp": timestamp, "Processed": False}
|
| 146 |
-
|
| 147 |
df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
|
| 148 |
-
|
| 149 |
updated_ds = Dataset.from_pandas(df)
|
| 150 |
-
|
| 151 |
updated_ds.push_to_hub(HF_DATASET_REPO, token=HF_TOKEN)
|
| 152 |
-
|
| 153 |
except Exception as e:
|
| 154 |
-
|
| 155 |
print(f"Failed to save query: {e}")
|
| 156 |
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
# Query Groq LLM
|
| 160 |
-
|
| 161 |
-
def query_groq_llm(prompt, model_name="llama3-70b-8192"):
|
| 162 |
-
|
| 163 |
try:
|
| 164 |
-
|
| 165 |
chat_completion = groq_client.chat.completions.create(
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
"role": "user",
|
| 170 |
-
|
| 171 |
-
"content": prompt
|
| 172 |
-
|
| 173 |
-
}],
|
| 174 |
-
|
| 175 |
-
model=model_name,
|
| 176 |
-
|
| 177 |
temperature=0.7,
|
| 178 |
-
|
| 179 |
-
max_tokens=500
|
| 180 |
-
|
| 181 |
)
|
| 182 |
-
|
| 183 |
return chat_completion.choices[0].message.content.strip()
|
| 184 |
-
|
| 185 |
except Exception as e:
|
| 186 |
-
|
| 187 |
print(f"Error querying Groq API: {e}")
|
| 188 |
-
|
| 189 |
return ""
|
| 190 |
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
# Main logic function to be called from Gradio
|
| 194 |
-
|
| 195 |
def get_best_answer(user_input):
|
| 196 |
-
|
| 197 |
if not user_input.strip():
|
| 198 |
-
|
| 199 |
return "Please enter a valid question."
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
| 203 |
user_input_lower = user_input.lower().strip()
|
| 204 |
|
| 205 |
-
|
| 206 |
-
|
| 207 |
if len(user_input_lower.split()) < 3 and not any(greet in user_input_lower for greet in GREETINGS):
|
| 208 |
-
|
| 209 |
return "Please ask your question properly with at least 3 words."
|
| 210 |
|
| 211 |
-
|
| 212 |
-
|
| 213 |
if any(keyword in user_input_lower for keyword in ["fee structure", "fees structure", "semester fees", "semester fee"]):
|
| 214 |
-
|
| 215 |
return (
|
| 216 |
-
|
| 217 |
"💰 For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
|
| 218 |
-
|
| 219 |
-
"You'll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
|
| 220 |
-
|
| 221 |
"🔗 https://ue.edu.pk/allfeestructure.php"
|
| 222 |
-
|
| 223 |
)
|
| 224 |
|
| 225 |
-
|
| 226 |
-
|
| 227 |
user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
|
| 228 |
-
|
| 229 |
similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
|
| 230 |
-
|
| 231 |
best_match_idx = similarities.argmax().item()
|
| 232 |
-
|
| 233 |
best_score = similarities[best_match_idx].item()
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
manage_unmatched_queries(user_input)
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
Use structured formatting (like headings, bullet points, or numbered lists) where appropriate.
|
| 252 |
-
|
| 253 |
-
DO NOT add any new or extra information. ONLY rephrase and improve the clarity and formatting of the original answer.
|
| 254 |
-
|
| 255 |
-
### Question:
|
| 256 |
-
|
| 257 |
-
{user_input}
|
| 258 |
-
|
| 259 |
-
### Original Answer:
|
| 260 |
-
|
| 261 |
-
{original_answer}
|
| 262 |
-
|
| 263 |
-
### Rephrased Answer:
|
| 264 |
-
|
| 265 |
-
"""
|
| 266 |
-
|
| 267 |
-
|
| 268 |
|
| 269 |
llm_response = query_groq_llm(prompt)
|
| 270 |
|
| 271 |
-
|
| 272 |
-
|
| 273 |
if llm_response:
|
| 274 |
-
|
| 275 |
for marker in ["Improved Answer:", "Official Answer:", "Rephrased Answer:"]:
|
| 276 |
-
|
| 277 |
if marker in llm_response:
|
| 278 |
-
|
| 279 |
return llm_response.split(marker)[-1].strip()
|
| 280 |
-
|
| 281 |
return llm_response
|
| 282 |
-
|
| 283 |
else:
|
| 284 |
-
|
| 285 |
-
return dataset_answers[best_match_idx]
|
| 286 |
-
|
|
|
|
| 1 |
import json
|
| 2 |
+
import glob
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import os
|
| 4 |
+
import random
|
| 5 |
import pandas as pd
|
| 6 |
+
from datetime import datetime
|
|
|
|
|
|
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
|
| 9 |
+
# Core AI Libraries
|
| 10 |
+
from sentence_transformers import SentenceTransformer, util
|
| 11 |
+
from groq import Groq
|
| 12 |
+
from datasets import load_dataset, Dataset
|
|
|
|
| 13 |
|
| 14 |
# Load environment variables
|
|
|
|
| 15 |
load_dotenv()
|
| 16 |
|
|
|
|
|
|
|
| 17 |
# Initialize Groq client
|
|
|
|
| 18 |
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 19 |
|
|
|
|
|
|
|
| 20 |
# Load similarity model
|
|
|
|
| 21 |
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 22 |
|
|
|
|
|
|
|
| 23 |
# Config
|
|
|
|
| 24 |
HF_DATASET_REPO = "midrees2806/unmatched_queries"
|
|
|
|
| 25 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 26 |
|
|
|
|
|
|
|
| 27 |
# Greeting list
|
|
|
|
| 28 |
GREETINGS = [
|
|
|
|
| 29 |
"hi", "hello", "hey", "good morning", "good afternoon", "good evening",
|
| 30 |
+
"assalam o alaikum", "salam", "aoa", "hi there", "hey there", "greetings"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
]
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
# Load multiple JSON datasets
|
|
|
|
| 34 |
dataset = []
|
|
|
|
| 35 |
try:
|
|
|
|
| 36 |
json_files = glob.glob('datasets/*.json')
|
|
|
|
| 37 |
for file_path in json_files:
|
|
|
|
| 38 |
with open(file_path, 'r', encoding='utf-8') as f:
|
|
|
|
| 39 |
data = json.load(f)
|
|
|
|
| 40 |
if isinstance(data, list):
|
|
|
|
| 41 |
for item in data:
|
|
|
|
| 42 |
if isinstance(item, dict) and 'Question' in item and 'Answer' in item:
|
|
|
|
| 43 |
dataset.append(item)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
except Exception as e:
|
|
|
|
| 45 |
print(f"Error loading datasets: {e}")
|
| 46 |
|
|
|
|
|
|
|
| 47 |
# Precompute embeddings
|
|
|
|
| 48 |
dataset_questions = [item.get("Question", "").lower().strip() for item in dataset]
|
|
|
|
| 49 |
dataset_answers = [item.get("Answer", "") for item in dataset]
|
|
|
|
| 50 |
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
def manage_unmatched_queries(query: str):
|
|
|
|
| 53 |
try:
|
|
|
|
| 54 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
| 55 |
try:
|
|
|
|
| 56 |
ds = load_dataset(HF_DATASET_REPO, token=HF_TOKEN)
|
|
|
|
| 57 |
df = ds["train"].to_pandas()
|
|
|
|
| 58 |
except:
|
|
|
|
| 59 |
df = pd.DataFrame(columns=["Query", "Timestamp", "Processed"])
|
| 60 |
+
|
| 61 |
if query not in df["Query"].values:
|
|
|
|
| 62 |
new_entry = {"Query": query, "Timestamp": timestamp, "Processed": False}
|
|
|
|
| 63 |
df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
|
|
|
|
| 64 |
updated_ds = Dataset.from_pandas(df)
|
|
|
|
| 65 |
updated_ds.push_to_hub(HF_DATASET_REPO, token=HF_TOKEN)
|
|
|
|
| 66 |
except Exception as e:
|
|
|
|
| 67 |
print(f"Failed to save query: {e}")
|
| 68 |
|
| 69 |
+
def query_groq_llm(prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
try:
|
|
|
|
| 71 |
chat_completion = groq_client.chat.completions.create(
|
| 72 |
+
messages=[{"role": "user", "content": prompt}],
|
| 73 |
+
model="llama3-70b-8192",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
temperature=0.7,
|
| 75 |
+
max_tokens=600
|
|
|
|
|
|
|
| 76 |
)
|
|
|
|
| 77 |
return chat_completion.choices[0].message.content.strip()
|
|
|
|
| 78 |
except Exception as e:
|
|
|
|
| 79 |
print(f"Error querying Groq API: {e}")
|
|
|
|
| 80 |
return ""
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
def get_best_answer(user_input):
|
|
|
|
| 83 |
if not user_input.strip():
|
|
|
|
| 84 |
return "Please enter a valid question."
|
| 85 |
|
|
|
|
|
|
|
| 86 |
user_input_lower = user_input.lower().strip()
|
| 87 |
|
| 88 |
+
# Basic Validation
|
|
|
|
| 89 |
if len(user_input_lower.split()) < 3 and not any(greet in user_input_lower for greet in GREETINGS):
|
|
|
|
| 90 |
return "Please ask your question properly with at least 3 words."
|
| 91 |
|
| 92 |
+
# Fee Check
|
|
|
|
| 93 |
if any(keyword in user_input_lower for keyword in ["fee structure", "fees structure", "semester fees", "semester fee"]):
|
|
|
|
| 94 |
return (
|
|
|
|
| 95 |
"💰 For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
|
|
|
|
|
|
|
|
|
|
| 96 |
"🔗 https://ue.edu.pk/allfeestructure.php"
|
|
|
|
| 97 |
)
|
| 98 |
|
| 99 |
+
# Similarity Calculation
|
|
|
|
| 100 |
user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
|
|
|
|
| 101 |
similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
|
|
|
|
| 102 |
best_match_idx = similarities.argmax().item()
|
|
|
|
| 103 |
best_score = similarities[best_match_idx].item()
|
| 104 |
|
| 105 |
+
if best_score >= 0.65:
|
| 106 |
+
# PATH 1: Dataset Match (Rephrase with LLM)
|
| 107 |
+
original_answer = dataset_answers[best_match_idx]
|
| 108 |
+
prompt = f"""Name is UOE AI Assistant! You are an official assistant for the University of Education Lahore.
|
| 109 |
+
Rephrase the following official answer clearly and professionally using bullet points or headings where needed.
|
| 110 |
+
DO NOT add extra information.
|
| 111 |
+
|
| 112 |
+
Question: {user_input}
|
| 113 |
+
Original Answer: {original_answer}
|
| 114 |
+
Rephrased Answer:"""
|
| 115 |
+
else:
|
| 116 |
+
# PATH 2: No Dataset Match (Use LLM Knowledge + Logging)
|
| 117 |
manage_unmatched_queries(user_input)
|
| 118 |
+
|
| 119 |
+
prompt = f"""You are the UOE AI Assistant for University of Education Lahore.
|
| 120 |
+
The user asked: "{user_input}".
|
| 121 |
+
1. Answer this question based on your general knowledge about University of Education Lahore.
|
| 122 |
+
2. After the answer, strictly include a note saying that this specific query has been forwarded to the support team for verification and will be added to our verified database soon.
|
| 123 |
+
3. Mention that for 100% confirmed information, they should visit the official website (https://ue.edu.pk) or contact:
|
| 124 |
+
- Phone: +92-42-99262231-33
|
| 125 |
+
- Email: info@ue.edu.pk
|
| 126 |
+
|
| 127 |
+
Make the response professional and formatted with headings/points."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
llm_response = query_groq_llm(prompt)
|
| 130 |
|
| 131 |
+
# Cleaning up response labels if any
|
|
|
|
| 132 |
if llm_response:
|
|
|
|
| 133 |
for marker in ["Improved Answer:", "Official Answer:", "Rephrased Answer:"]:
|
|
|
|
| 134 |
if marker in llm_response:
|
|
|
|
| 135 |
return llm_response.split(marker)[-1].strip()
|
|
|
|
| 136 |
return llm_response
|
|
|
|
| 137 |
else:
|
| 138 |
+
return dataset_answers[best_match_idx] if best_score >= 0.65 else "Please contact info@ue.edu.pk for assistance."
|
|
|
|
|
|