Spaces:
Sleeping
Sleeping
Update rag.py
Browse files
rag.py
CHANGED
|
@@ -1,13 +1,20 @@
|
|
| 1 |
import json
|
| 2 |
-
|
| 3 |
-
from groq import Groq
|
| 4 |
-
from datetime import datetime
|
| 5 |
import os
|
| 6 |
import pandas as pd
|
| 7 |
-
from
|
| 8 |
from dotenv import load_dotenv
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# Load environment variables
|
| 13 |
load_dotenv()
|
|
@@ -15,31 +22,17 @@ load_dotenv()
|
|
| 15 |
# Initialize Groq client
|
| 16 |
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 17 |
|
| 18 |
-
# Load
|
| 19 |
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 20 |
|
| 21 |
# Config
|
| 22 |
HF_DATASET_REPO = "midrees2806/unmatched_queries"
|
| 23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
|
| 25 |
-
#
|
| 26 |
-
GREETINGS = [
|
| 27 |
-
"hi", "hello", "hey", "good morning", "good afternoon", "good evening",
|
| 28 |
-
"assalam o alaikum", "salam", "aoa", "hi there",
|
| 29 |
-
"hey there", "greetings"
|
| 30 |
-
]
|
| 31 |
-
|
| 32 |
-
# Fixed rephrased unmatched query responses
|
| 33 |
-
UNMATCHED_RESPONSES = [
|
| 34 |
-
"Thank you for your query. Weโve forwarded it to our support team and it will be added soon. In the meantime, you can visit the University of Education official website or reach out via the contact details below.\n\n๐ +92-42-99262231-33\nโ๏ธ info@ue.edu.pk\n๐ https://ue.edu.pk",
|
| 35 |
-
"Weโve noted your question and itโs in queue for inclusion. For now, please check the University of Education website or contact the administration directly.\n\n๐ +92-42-99262231-33\nโ๏ธ info@ue.edu.pk\n๐ https://ue.edu.pk",
|
| 36 |
-
"Your query has been recorded. Weโll update the system with relevant information shortly. Meanwhile, you can visit UE's official site or reach out using the details below:\n\n๐ +92-42-99262231-33\nโ๏ธ info@ue.edu.pk\n๐ https://ue.edu.pk",
|
| 37 |
-
"We appreciate your question. It has been forwarded for further processing. Until itโs available here, feel free to visit the official UE website or use the contact options:\n\n๐ +92-42-99262231-33\nโ๏ธ info@ue.edu.pk\n๐ https://ue.edu.pk"
|
| 38 |
-
]
|
| 39 |
-
|
| 40 |
-
# Load multiple JSON datasets
|
| 41 |
dataset = []
|
| 42 |
try:
|
|
|
|
| 43 |
json_files = glob.glob('datasets/*.json')
|
| 44 |
for file_path in json_files:
|
| 45 |
with open(file_path, 'r', encoding='utf-8') as f:
|
|
@@ -48,17 +41,20 @@ try:
|
|
| 48 |
for item in data:
|
| 49 |
if isinstance(item, dict) and 'Question' in item and 'Answer' in item:
|
| 50 |
dataset.append(item)
|
| 51 |
-
else:
|
| 52 |
-
print(f"Invalid entry in {file_path}: {item}")
|
| 53 |
else:
|
| 54 |
-
print(f"
|
| 55 |
except Exception as e:
|
| 56 |
print(f"Error loading datasets: {e}")
|
| 57 |
|
| 58 |
# Precompute embeddings
|
| 59 |
dataset_questions = [item.get("Question", "").lower().strip() for item in dataset]
|
| 60 |
dataset_answers = [item.get("Answer", "") for item in dataset]
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
# Save unmatched queries to Hugging Face
|
| 64 |
def manage_unmatched_queries(query: str):
|
|
@@ -69,6 +65,7 @@ def manage_unmatched_queries(query: str):
|
|
| 69 |
df = ds["train"].to_pandas()
|
| 70 |
except:
|
| 71 |
df = pd.DataFrame(columns=["Query", "Timestamp", "Processed"])
|
|
|
|
| 72 |
if query not in df["Query"].values:
|
| 73 |
new_entry = {"Query": query, "Timestamp": timestamp, "Processed": False}
|
| 74 |
df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
|
|
@@ -94,23 +91,25 @@ def query_groq_llm(prompt, model_name="llama3-70b-8192"):
|
|
| 94 |
print(f"Error querying Groq API: {e}")
|
| 95 |
return ""
|
| 96 |
|
| 97 |
-
# Main logic function
|
| 98 |
def get_best_answer(user_input):
|
| 99 |
if not user_input.strip():
|
| 100 |
return "Please enter a valid question."
|
| 101 |
-
|
| 102 |
user_input_lower = user_input.lower().strip()
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
if any(keyword in user_input_lower for keyword in ["fee structure", "fees structure", "semester fees", "semester fee"]):
|
| 108 |
return (
|
| 109 |
"๐ฐ For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
|
| 110 |
-
"You
|
| 111 |
"๐ https://ue.edu.pk/allfeestructure.php"
|
| 112 |
)
|
| 113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
|
| 115 |
similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
|
| 116 |
best_match_idx = similarities.argmax().item()
|
|
@@ -118,26 +117,33 @@ def get_best_answer(user_input):
|
|
| 118 |
|
| 119 |
if best_score < 0.65:
|
| 120 |
manage_unmatched_queries(user_input)
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
"""
|
| 134 |
|
| 135 |
llm_response = query_groq_llm(prompt)
|
| 136 |
|
| 137 |
if llm_response:
|
| 138 |
-
for marker in ["Improved Answer:", "Official Answer:"
|
| 139 |
if marker in llm_response:
|
| 140 |
-
|
| 141 |
-
|
|
|
|
|
|
|
| 142 |
else:
|
| 143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
+
import glob
|
|
|
|
|
|
|
| 3 |
import os
|
| 4 |
import pandas as pd
|
| 5 |
+
from datetime import datetime
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
+
|
| 8 |
+
# AI and Data Libraries
|
| 9 |
+
from sentence_transformers import SentenceTransformer, util
|
| 10 |
+
from groq import Groq
|
| 11 |
+
from datasets import load_dataset, Dataset
|
| 12 |
+
|
| 13 |
+
# Image and Utility Libraries
|
| 14 |
+
import requests
|
| 15 |
+
from io import BytesIO
|
| 16 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 17 |
+
import numpy as np
|
| 18 |
|
| 19 |
# Load environment variables
|
| 20 |
load_dotenv()
|
|
|
|
| 22 |
# Initialize Groq client
|
| 23 |
groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
| 24 |
|
| 25 |
+
# Load models and dataset
|
| 26 |
similarity_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 27 |
|
| 28 |
# Config
|
| 29 |
HF_DATASET_REPO = "midrees2806/unmatched_queries"
|
| 30 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 31 |
|
| 32 |
+
# Load multiple JSON datasets from the 'datasets' folder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
dataset = []
|
| 34 |
try:
|
| 35 |
+
# Using glob to find all json files in the folder
|
| 36 |
json_files = glob.glob('datasets/*.json')
|
| 37 |
for file_path in json_files:
|
| 38 |
with open(file_path, 'r', encoding='utf-8') as f:
|
|
|
|
| 41 |
for item in data:
|
| 42 |
if isinstance(item, dict) and 'Question' in item and 'Answer' in item:
|
| 43 |
dataset.append(item)
|
|
|
|
|
|
|
| 44 |
else:
|
| 45 |
+
print(f"Skipping {file_path}: File does not contain a list.")
|
| 46 |
except Exception as e:
|
| 47 |
print(f"Error loading datasets: {e}")
|
| 48 |
|
| 49 |
# Precompute embeddings
|
| 50 |
dataset_questions = [item.get("Question", "").lower().strip() for item in dataset]
|
| 51 |
dataset_answers = [item.get("Answer", "") for item in dataset]
|
| 52 |
+
|
| 53 |
+
if dataset_questions:
|
| 54 |
+
dataset_embeddings = similarity_model.encode(dataset_questions, convert_to_tensor=True)
|
| 55 |
+
else:
|
| 56 |
+
dataset_embeddings = None
|
| 57 |
+
print("Warning: No data found in the datasets folder.")
|
| 58 |
|
| 59 |
# Save unmatched queries to Hugging Face
|
| 60 |
def manage_unmatched_queries(query: str):
|
|
|
|
| 65 |
df = ds["train"].to_pandas()
|
| 66 |
except:
|
| 67 |
df = pd.DataFrame(columns=["Query", "Timestamp", "Processed"])
|
| 68 |
+
|
| 69 |
if query not in df["Query"].values:
|
| 70 |
new_entry = {"Query": query, "Timestamp": timestamp, "Processed": False}
|
| 71 |
df = pd.concat([df, pd.DataFrame([new_entry])], ignore_index=True)
|
|
|
|
| 91 |
print(f"Error querying Groq API: {e}")
|
| 92 |
return ""
|
| 93 |
|
| 94 |
+
# Main logic function
|
| 95 |
def get_best_answer(user_input):
|
| 96 |
if not user_input.strip():
|
| 97 |
return "Please enter a valid question."
|
| 98 |
+
|
| 99 |
user_input_lower = user_input.lower().strip()
|
| 100 |
|
| 101 |
+
# ๐ Check if question is about fee
|
| 102 |
+
if any(keyword in user_input_lower for keyword in ["fee", "fees", "charges", "semester fee"]):
|
|
|
|
|
|
|
| 103 |
return (
|
| 104 |
"๐ฐ For complete and up-to-date fee details for this program, we recommend visiting the official University of Education fee structure page.\n"
|
| 105 |
+
"Youโll find comprehensive information regarding tuition, admission charges, and other applicable fees there.\n"
|
| 106 |
"๐ https://ue.edu.pk/allfeestructure.php"
|
| 107 |
)
|
| 108 |
|
| 109 |
+
# ๐ Continue with normal similarity-based logic
|
| 110 |
+
if dataset_embeddings is None:
|
| 111 |
+
return "I am currently updating my database. Please try again in a moment."
|
| 112 |
+
|
| 113 |
user_embedding = similarity_model.encode(user_input_lower, convert_to_tensor=True)
|
| 114 |
similarities = util.pytorch_cos_sim(user_embedding, dataset_embeddings)[0]
|
| 115 |
best_match_idx = similarities.argmax().item()
|
|
|
|
| 117 |
|
| 118 |
if best_score < 0.65:
|
| 119 |
manage_unmatched_queries(user_input)
|
| 120 |
+
|
| 121 |
+
if best_score >= 0.65:
|
| 122 |
+
original_answer = dataset_answers[best_match_idx]
|
| 123 |
+
prompt = f"""As an official assistant for University of Education Lahore, provide a clear response:
|
| 124 |
+
Question: {user_input}
|
| 125 |
+
Original Answer: {original_answer}
|
| 126 |
+
Improved Answer:"""
|
| 127 |
+
else:
|
| 128 |
+
prompt = f"""As an official assistant for University of Education Lahore, provide a helpful response:
|
| 129 |
+
Include relevant details about university policies.
|
| 130 |
+
If unsure, direct to official channels.
|
| 131 |
+
Question: {user_input}
|
| 132 |
+
Official Answer:"""
|
| 133 |
|
| 134 |
llm_response = query_groq_llm(prompt)
|
| 135 |
|
| 136 |
if llm_response:
|
| 137 |
+
for marker in ["Improved Answer:", "Official Answer:"]:
|
| 138 |
if marker in llm_response:
|
| 139 |
+
response = llm_response.split(marker)[-1].strip()
|
| 140 |
+
break
|
| 141 |
+
else:
|
| 142 |
+
response = llm_response
|
| 143 |
else:
|
| 144 |
+
response = dataset_answers[best_match_idx] if best_score >= 0.65 else """For official information:
|
| 145 |
+
๐ +92-42-99262231-33
|
| 146 |
+
โ๏ธ info@ue.edu.pk
|
| 147 |
+
๐ ue.edu.pk"""
|
| 148 |
+
|
| 149 |
+
return response
|