Spaces:
Sleeping
Sleeping
Update tools.py
Browse files
tools.py
CHANGED
|
@@ -1,235 +1,235 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import json
|
| 3 |
-
import datetime
|
| 4 |
-
import requests
|
| 5 |
-
import logging
|
| 6 |
-
# import gspread
|
| 7 |
-
from dotenv import load_dotenv
|
| 8 |
-
|
| 9 |
-
# Configure logging
|
| 10 |
-
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 11 |
-
logger = logging.getLogger(__name__)
|
| 12 |
-
# from huggingface_hub import login as hf_login
|
| 13 |
-
from langchain_community.vectorstores import FAISS
|
| 14 |
-
from langchain.embeddings.base import Embeddings
|
| 15 |
-
from sentence_transformers import SentenceTransformer
|
| 16 |
-
from langchain_tavily import TavilySearch
|
| 17 |
-
from google.adk.tools import FunctionTool
|
| 18 |
-
|
| 19 |
-
# === LOAD ENV ===
|
| 20 |
-
load_dotenv()
|
| 21 |
-
# HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 22 |
-
# GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 23 |
-
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
|
| 24 |
-
# SERVICE_ACCOUNT_JSON = os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
|
| 25 |
-
# SHEET_KEY = os.getenv("SHEET_KEY")
|
| 26 |
-
PREDICTOR_API_URL = os.getenv("PREDICTOR_API_URL")
|
| 27 |
-
PREDICTOR_API_KEY = os.getenv("PREDICTOR_API_KEY")
|
| 28 |
-
|
| 29 |
-
# hf_login(token=HF_TOKEN)
|
| 30 |
-
|
| 31 |
-
# === GOOGLE SHEET LOGGING ===
|
| 32 |
-
# service_account_dict = json.loads(SERVICE_ACCOUNT_JSON) if isinstance(SERVICE_ACCOUNT_JSON, str) else SERVICE_ACCOUNT_JSON
|
| 33 |
-
|
| 34 |
-
# def add_query_to_sheet(user_id: str, query: str, response: str):
|
| 35 |
-
# gc = gspread.service_account_from_dict(service_account_dict)
|
| 36 |
-
# sh = gc.open_by_key(SHEET_KEY)
|
| 37 |
-
# ws = sh.worksheet("Sheet1")
|
| 38 |
-
# timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 39 |
-
# ws.append_row([user_id, timestamp, query, response])
|
| 40 |
-
|
| 41 |
-
# === VECTOR STORE ===
|
| 42 |
-
def load_vector_store(data_dir: str):
|
| 43 |
-
texts = []
|
| 44 |
-
for fname in os.listdir(data_dir):
|
| 45 |
-
if fname.lower().endswith(".md"):
|
| 46 |
-
path = os.path.join(data_dir, fname)
|
| 47 |
-
try:
|
| 48 |
-
with open(path, "r", encoding="utf-8") as f:
|
| 49 |
-
texts.append(f.read())
|
| 50 |
-
except UnicodeDecodeError:
|
| 51 |
-
with open(path, "r", encoding="latin-1") as f:
|
| 52 |
-
texts.append(f.read())
|
| 53 |
-
st_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 54 |
-
class LocalEmbeddings(Embeddings):
|
| 55 |
-
def embed_documents(self, docs):
|
| 56 |
-
return st_model.encode(docs).tolist()
|
| 57 |
-
def embed_query(self, q):
|
| 58 |
-
return st_model.encode([q])[0].tolist()
|
| 59 |
-
return FAISS.from_texts(texts, LocalEmbeddings())
|
| 60 |
-
|
| 61 |
-
vector_store = load_vector_store("College_markdown")
|
| 62 |
-
|
| 63 |
-
# === TOOL DEFINITIONS ===
|
| 64 |
-
def db_search(query: str) -> dict:
|
| 65 |
-
docs = vector_store.similarity_search(query, k=6)
|
| 66 |
-
if not docs: return {"results": []}
|
| 67 |
-
return {"results": [d.page_content for d in docs]}
|
| 68 |
-
|
| 69 |
-
def tavily_search(query: str) -> dict:
|
| 70 |
-
if not TAVILY_API_KEY:
|
| 71 |
-
return {"results": ["Tavily API key not configured"]}
|
| 72 |
-
|
| 73 |
-
tool = TavilySearch(api_key=TAVILY_API_KEY, max_results=6, topic="general", include_raw_content=True)
|
| 74 |
-
result = tool.invoke({"query": query})
|
| 75 |
-
snippets = [item.get('content') for item in result.get('results', [])]
|
| 76 |
-
return {"results": snippets or []}
|
| 77 |
-
|
| 78 |
-
def college_predictor(
|
| 79 |
-
userCrl: int,
|
| 80 |
-
userCategory: str,
|
| 81 |
-
userGender: str,
|
| 82 |
-
userHomeState: str,
|
| 83 |
-
limit: int = 4,
|
| 84 |
-
counsellingName: str = "csab",
|
| 85 |
-
collegeName: str = "national institute of technology",
|
| 86 |
-
branchName: str = "computer science and engineering"
|
| 87 |
-
) -> str:
|
| 88 |
-
# Log the function call with all parameters
|
| 89 |
-
logger.info("=" * 80)
|
| 90 |
-
logger.info("PREDICTOR API CALL STARTED")
|
| 91 |
-
logger.info("=" * 80)
|
| 92 |
-
|
| 93 |
-
logger.info("Input Parameters:")
|
| 94 |
-
logger.info(f" userCrl: {userCrl} (type: {type(userCrl)})")
|
| 95 |
-
logger.info(f" userCategory: {userCategory} (type: {type(userCategory)})")
|
| 96 |
-
logger.info(f" userGender: {userGender} (type: {type(userGender)})")
|
| 97 |
-
logger.info(f" userHomeState: {userHomeState} (type: {type(userHomeState)})")
|
| 98 |
-
logger.info(f" limit: {limit} (type: {type(limit)})")
|
| 99 |
-
logger.info(f" counsellingName: {counsellingName} (type: {type(counsellingName)})")
|
| 100 |
-
logger.info(f" collegeName: {collegeName} (type: {type(collegeName)})")
|
| 101 |
-
logger.info(f" branchName: {branchName} (type: {type(branchName)})")
|
| 102 |
-
|
| 103 |
-
headers = {
|
| 104 |
-
"Content-Type": "application/json",
|
| 105 |
-
"Authorization": f"Bearer {PREDICTOR_API_KEY}"
|
| 106 |
-
}
|
| 107 |
-
|
| 108 |
-
try:
|
| 109 |
-
# Log parameter conversion
|
| 110 |
-
logger.info("Converting parameters...")
|
| 111 |
-
converted_crl = int(userCrl)
|
| 112 |
-
logger.info(f" userCrl converted: {converted_crl} (type: {type(converted_crl)})")
|
| 113 |
-
|
| 114 |
-
params = {
|
| 115 |
-
"userCrl": converted_crl,
|
| 116 |
-
"userCategory": userCategory,
|
| 117 |
-
"userGender": userGender,
|
| 118 |
-
"userHomeState": userHomeState,
|
| 119 |
-
"limit": limit,
|
| 120 |
-
"counsellingName": counsellingName,
|
| 121 |
-
}
|
| 122 |
-
|
| 123 |
-
if collegeName:
|
| 124 |
-
params["collegeQuery"] = collegeName
|
| 125 |
-
logger.info(f" Added collegeQuery: {collegeName}")
|
| 126 |
-
if branchName:
|
| 127 |
-
params["branchQuery"] = branchName
|
| 128 |
-
logger.info(f" Added branchQuery: {branchName}")
|
| 129 |
-
|
| 130 |
-
# Log the final payload
|
| 131 |
-
logger.info("Final API Request:")
|
| 132 |
-
logger.info(f" URL: {PREDICTOR_API_URL}")
|
| 133 |
-
logger.info(f" Headers: {json.dumps(headers, indent=2)}")
|
| 134 |
-
logger.info(f" Payload: {json.dumps(params, indent=2)}")
|
| 135 |
-
|
| 136 |
-
logger.info("Making API request...")
|
| 137 |
-
response = requests.post(PREDICTOR_API_URL, json=params, headers=headers, timeout=30)
|
| 138 |
-
|
| 139 |
-
# Log response details
|
| 140 |
-
logger.info("API Response:")
|
| 141 |
-
logger.info(f" Status Code: {response.status_code}")
|
| 142 |
-
logger.info(f" Response Headers: {dict(response.headers)}")
|
| 143 |
-
logger.info(f" Response Content: {response.text}")
|
| 144 |
-
|
| 145 |
-
response.raise_for_status()
|
| 146 |
-
data = response.json()
|
| 147 |
-
|
| 148 |
-
logger.info("Response parsed successfully")
|
| 149 |
-
logger.info(f" Parsed data keys: {list(data.keys()) if data else 'None'}")
|
| 150 |
-
|
| 151 |
-
if not data or 'data' not in data or 'colleges' not in data['data']:
|
| 152 |
-
logger.warning("No college predictions found in response")
|
| 153 |
-
return "No college predictions found with the given criteria."
|
| 154 |
-
|
| 155 |
-
colleges = data['data']['colleges']
|
| 156 |
-
logger.info(f"Found {len(colleges)} colleges in response")
|
| 157 |
-
|
| 158 |
-
if not colleges:
|
| 159 |
-
logger.warning("Colleges list is empty")
|
| 160 |
-
return "No college predictions found with the given criteria."
|
| 161 |
-
|
| 162 |
-
results = []
|
| 163 |
-
for i, college in enumerate(colleges[:limit], start=1):
|
| 164 |
-
logger.info(f"Processing college {i}: {college.get('Institute', 'N/A')}")
|
| 165 |
-
parts = [f"{i}. College: {college.get('Institute', 'N/A')}"]
|
| 166 |
-
if college.get('Academic_Program_Name'):
|
| 167 |
-
parts.append(f"Branch: {college['Academic_Program_Name']}")
|
| 168 |
-
if college.get('Seat_Type'):
|
| 169 |
-
parts.append(f"Category: {college['Seat_Type']}")
|
| 170 |
-
if college.get('Max_ClosingRank'):
|
| 171 |
-
parts.append(f"Closing Rank: {college['Max_ClosingRank']}")
|
| 172 |
-
results.append(", ".join(parts))
|
| 173 |
-
|
| 174 |
-
final_result = f"Based on your rank {userCrl}, here are college predictions:\n\n" + "\n".join(results)
|
| 175 |
-
|
| 176 |
-
logger.info("=" * 80)
|
| 177 |
-
logger.info("PREDICTOR API CALL COMPLETED SUCCESSFULLY")
|
| 178 |
-
logger.info("=" * 80)
|
| 179 |
-
|
| 180 |
-
return final_result
|
| 181 |
-
|
| 182 |
-
except ValueError as e:
|
| 183 |
-
error_msg = f"Parameter conversion error: {str(e)}"
|
| 184 |
-
logger.error(f"ValueError: {error_msg}")
|
| 185 |
-
logger.error("=" * 80)
|
| 186 |
-
return error_msg
|
| 187 |
-
|
| 188 |
-
except requests.exceptions.HTTPError as e:
|
| 189 |
-
error_msg = f"HTTP Error: {str(e)}"
|
| 190 |
-
logger.error(f"HTTPError: {error_msg}")
|
| 191 |
-
logger.error(f"Response body: {response.text if 'response' in locals() else 'No response'}")
|
| 192 |
-
logger.error("=" * 80)
|
| 193 |
-
return f"Error fetching college predictions: {error_msg}"
|
| 194 |
-
|
| 195 |
-
except requests.exceptions.RequestException as e:
|
| 196 |
-
error_msg = f"Request Error: {str(e)}"
|
| 197 |
-
logger.error(f"RequestException: {error_msg}")
|
| 198 |
-
logger.error("=" * 80)
|
| 199 |
-
return f"Error fetching college predictions: {error_msg}"
|
| 200 |
-
|
| 201 |
-
except Exception as e:
|
| 202 |
-
error_msg = f"Unexpected error: {str(e)}"
|
| 203 |
-
logger.error(f"Exception: {error_msg}")
|
| 204 |
-
logger.error("=" * 80)
|
| 205 |
-
return f"Error fetching college predictions: {error_msg}"
|
| 206 |
-
|
| 207 |
-
def mentor_search(college_query: str) -> str:
|
| 208 |
-
"""Search mentors by college name and return formatted links."""
|
| 209 |
-
url = f"https://api.precollege.in/api/v1/
|
| 210 |
-
try:
|
| 211 |
-
response = requests.get(url, timeout=10)
|
| 212 |
-
response.raise_for_status()
|
| 213 |
-
data = response.json()
|
| 214 |
-
|
| 215 |
-
if not data or "data" not in data or not data["data"]:
|
| 216 |
-
return f"No mentors found for '{college_query}'."
|
| 217 |
-
|
| 218 |
-
mentors = data["data"]
|
| 219 |
-
lines = []
|
| 220 |
-
for mentor in mentors:
|
| 221 |
-
name = mentor.get("name", "Unknown")
|
| 222 |
-
username = mentor.get("username", "")
|
| 223 |
-
profile_url = f"https://www.precollege.in/mentor/{username}" if username else "No profile link"
|
| 224 |
-
lines.append(f"{name}: {profile_url}")
|
| 225 |
-
|
| 226 |
-
return f"Mentors for '{college_query}':\n\n" + "\n".join(lines)
|
| 227 |
-
except requests.exceptions.RequestException as e:
|
| 228 |
-
return f"Failed to fetch mentors: {str(e)}"
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
# === FUNCTION TOOL WRAPPERS ===
|
| 232 |
-
db_tool = FunctionTool(db_search)
|
| 233 |
-
tavily_tool = FunctionTool(tavily_search)
|
| 234 |
-
predictor_tool = FunctionTool(college_predictor)
|
| 235 |
-
mentor_tool = FunctionTool(mentor_search)
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import datetime
|
| 4 |
+
import requests
|
| 5 |
+
import logging
|
| 6 |
+
# import gspread
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
|
| 9 |
+
# Configure logging
|
| 10 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
# from huggingface_hub import login as hf_login
|
| 13 |
+
from langchain_community.vectorstores import FAISS
|
| 14 |
+
from langchain.embeddings.base import Embeddings
|
| 15 |
+
from sentence_transformers import SentenceTransformer
|
| 16 |
+
from langchain_tavily import TavilySearch
|
| 17 |
+
from google.adk.tools import FunctionTool
|
| 18 |
+
|
| 19 |
+
# === LOAD ENV ===
|
| 20 |
+
load_dotenv()
|
| 21 |
+
# HF_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 22 |
+
# GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 23 |
+
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
|
| 24 |
+
# SERVICE_ACCOUNT_JSON = os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
|
| 25 |
+
# SHEET_KEY = os.getenv("SHEET_KEY")
|
| 26 |
+
PREDICTOR_API_URL = os.getenv("PREDICTOR_API_URL")
|
| 27 |
+
PREDICTOR_API_KEY = os.getenv("PREDICTOR_API_KEY")
|
| 28 |
+
|
| 29 |
+
# hf_login(token=HF_TOKEN)
|
| 30 |
+
|
| 31 |
+
# === GOOGLE SHEET LOGGING ===
|
| 32 |
+
# service_account_dict = json.loads(SERVICE_ACCOUNT_JSON) if isinstance(SERVICE_ACCOUNT_JSON, str) else SERVICE_ACCOUNT_JSON
|
| 33 |
+
|
| 34 |
+
# def add_query_to_sheet(user_id: str, query: str, response: str):
|
| 35 |
+
# gc = gspread.service_account_from_dict(service_account_dict)
|
| 36 |
+
# sh = gc.open_by_key(SHEET_KEY)
|
| 37 |
+
# ws = sh.worksheet("Sheet1")
|
| 38 |
+
# timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 39 |
+
# ws.append_row([user_id, timestamp, query, response])
|
| 40 |
+
|
| 41 |
+
# === VECTOR STORE ===
|
| 42 |
+
def load_vector_store(data_dir: str):
|
| 43 |
+
texts = []
|
| 44 |
+
for fname in os.listdir(data_dir):
|
| 45 |
+
if fname.lower().endswith(".md"):
|
| 46 |
+
path = os.path.join(data_dir, fname)
|
| 47 |
+
try:
|
| 48 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 49 |
+
texts.append(f.read())
|
| 50 |
+
except UnicodeDecodeError:
|
| 51 |
+
with open(path, "r", encoding="latin-1") as f:
|
| 52 |
+
texts.append(f.read())
|
| 53 |
+
st_model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 54 |
+
class LocalEmbeddings(Embeddings):
|
| 55 |
+
def embed_documents(self, docs):
|
| 56 |
+
return st_model.encode(docs).tolist()
|
| 57 |
+
def embed_query(self, q):
|
| 58 |
+
return st_model.encode([q])[0].tolist()
|
| 59 |
+
return FAISS.from_texts(texts, LocalEmbeddings())
|
| 60 |
+
|
| 61 |
+
vector_store = load_vector_store("College_markdown")
|
| 62 |
+
|
| 63 |
+
# === TOOL DEFINITIONS ===
|
| 64 |
+
def db_search(query: str) -> dict:
|
| 65 |
+
docs = vector_store.similarity_search(query, k=6)
|
| 66 |
+
if not docs: return {"results": []}
|
| 67 |
+
return {"results": [d.page_content for d in docs]}
|
| 68 |
+
|
| 69 |
+
def tavily_search(query: str) -> dict:
|
| 70 |
+
if not TAVILY_API_KEY:
|
| 71 |
+
return {"results": ["Tavily API key not configured"]}
|
| 72 |
+
|
| 73 |
+
tool = TavilySearch(api_key=TAVILY_API_KEY, max_results=6, topic="general", include_raw_content=True)
|
| 74 |
+
result = tool.invoke({"query": query})
|
| 75 |
+
snippets = [item.get('content') for item in result.get('results', [])]
|
| 76 |
+
return {"results": snippets or []}
|
| 77 |
+
|
| 78 |
+
def college_predictor(
|
| 79 |
+
userCrl: int,
|
| 80 |
+
userCategory: str,
|
| 81 |
+
userGender: str,
|
| 82 |
+
userHomeState: str,
|
| 83 |
+
limit: int = 4,
|
| 84 |
+
counsellingName: str = "csab",
|
| 85 |
+
collegeName: str = "national institute of technology",
|
| 86 |
+
branchName: str = "computer science and engineering"
|
| 87 |
+
) -> str:
|
| 88 |
+
# Log the function call with all parameters
|
| 89 |
+
logger.info("=" * 80)
|
| 90 |
+
logger.info("PREDICTOR API CALL STARTED")
|
| 91 |
+
logger.info("=" * 80)
|
| 92 |
+
|
| 93 |
+
logger.info("Input Parameters:")
|
| 94 |
+
logger.info(f" userCrl: {userCrl} (type: {type(userCrl)})")
|
| 95 |
+
logger.info(f" userCategory: {userCategory} (type: {type(userCategory)})")
|
| 96 |
+
logger.info(f" userGender: {userGender} (type: {type(userGender)})")
|
| 97 |
+
logger.info(f" userHomeState: {userHomeState} (type: {type(userHomeState)})")
|
| 98 |
+
logger.info(f" limit: {limit} (type: {type(limit)})")
|
| 99 |
+
logger.info(f" counsellingName: {counsellingName} (type: {type(counsellingName)})")
|
| 100 |
+
logger.info(f" collegeName: {collegeName} (type: {type(collegeName)})")
|
| 101 |
+
logger.info(f" branchName: {branchName} (type: {type(branchName)})")
|
| 102 |
+
|
| 103 |
+
headers = {
|
| 104 |
+
"Content-Type": "application/json",
|
| 105 |
+
"Authorization": f"Bearer {PREDICTOR_API_KEY}"
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
# Log parameter conversion
|
| 110 |
+
logger.info("Converting parameters...")
|
| 111 |
+
converted_crl = int(userCrl)
|
| 112 |
+
logger.info(f" userCrl converted: {converted_crl} (type: {type(converted_crl)})")
|
| 113 |
+
|
| 114 |
+
params = {
|
| 115 |
+
"userCrl": converted_crl,
|
| 116 |
+
"userCategory": userCategory,
|
| 117 |
+
"userGender": userGender,
|
| 118 |
+
"userHomeState": userHomeState,
|
| 119 |
+
"limit": limit,
|
| 120 |
+
"counsellingName": counsellingName,
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
if collegeName:
|
| 124 |
+
params["collegeQuery"] = collegeName
|
| 125 |
+
logger.info(f" Added collegeQuery: {collegeName}")
|
| 126 |
+
if branchName:
|
| 127 |
+
params["branchQuery"] = branchName
|
| 128 |
+
logger.info(f" Added branchQuery: {branchName}")
|
| 129 |
+
|
| 130 |
+
# Log the final payload
|
| 131 |
+
logger.info("Final API Request:")
|
| 132 |
+
logger.info(f" URL: {PREDICTOR_API_URL}")
|
| 133 |
+
logger.info(f" Headers: {json.dumps(headers, indent=2)}")
|
| 134 |
+
logger.info(f" Payload: {json.dumps(params, indent=2)}")
|
| 135 |
+
|
| 136 |
+
logger.info("Making API request...")
|
| 137 |
+
response = requests.post(PREDICTOR_API_URL, json=params, headers=headers, timeout=30)
|
| 138 |
+
|
| 139 |
+
# Log response details
|
| 140 |
+
logger.info("API Response:")
|
| 141 |
+
logger.info(f" Status Code: {response.status_code}")
|
| 142 |
+
logger.info(f" Response Headers: {dict(response.headers)}")
|
| 143 |
+
logger.info(f" Response Content: {response.text}")
|
| 144 |
+
|
| 145 |
+
response.raise_for_status()
|
| 146 |
+
data = response.json()
|
| 147 |
+
|
| 148 |
+
logger.info("Response parsed successfully")
|
| 149 |
+
logger.info(f" Parsed data keys: {list(data.keys()) if data else 'None'}")
|
| 150 |
+
|
| 151 |
+
if not data or 'data' not in data or 'colleges' not in data['data']:
|
| 152 |
+
logger.warning("No college predictions found in response")
|
| 153 |
+
return "No college predictions found with the given criteria."
|
| 154 |
+
|
| 155 |
+
colleges = data['data']['colleges']
|
| 156 |
+
logger.info(f"Found {len(colleges)} colleges in response")
|
| 157 |
+
|
| 158 |
+
if not colleges:
|
| 159 |
+
logger.warning("Colleges list is empty")
|
| 160 |
+
return "No college predictions found with the given criteria."
|
| 161 |
+
|
| 162 |
+
results = []
|
| 163 |
+
for i, college in enumerate(colleges[:limit], start=1):
|
| 164 |
+
logger.info(f"Processing college {i}: {college.get('Institute', 'N/A')}")
|
| 165 |
+
parts = [f"{i}. College: {college.get('Institute', 'N/A')}"]
|
| 166 |
+
if college.get('Academic_Program_Name'):
|
| 167 |
+
parts.append(f"Branch: {college['Academic_Program_Name']}")
|
| 168 |
+
if college.get('Seat_Type'):
|
| 169 |
+
parts.append(f"Category: {college['Seat_Type']}")
|
| 170 |
+
if college.get('Max_ClosingRank'):
|
| 171 |
+
parts.append(f"Closing Rank: {college['Max_ClosingRank']}")
|
| 172 |
+
results.append(", ".join(parts))
|
| 173 |
+
|
| 174 |
+
final_result = f"Based on your rank {userCrl}, here are college predictions:\n\n" + "\n".join(results)
|
| 175 |
+
|
| 176 |
+
logger.info("=" * 80)
|
| 177 |
+
logger.info("PREDICTOR API CALL COMPLETED SUCCESSFULLY")
|
| 178 |
+
logger.info("=" * 80)
|
| 179 |
+
|
| 180 |
+
return final_result
|
| 181 |
+
|
| 182 |
+
except ValueError as e:
|
| 183 |
+
error_msg = f"Parameter conversion error: {str(e)}"
|
| 184 |
+
logger.error(f"ValueError: {error_msg}")
|
| 185 |
+
logger.error("=" * 80)
|
| 186 |
+
return error_msg
|
| 187 |
+
|
| 188 |
+
except requests.exceptions.HTTPError as e:
|
| 189 |
+
error_msg = f"HTTP Error: {str(e)}"
|
| 190 |
+
logger.error(f"HTTPError: {error_msg}")
|
| 191 |
+
logger.error(f"Response body: {response.text if 'response' in locals() else 'No response'}")
|
| 192 |
+
logger.error("=" * 80)
|
| 193 |
+
return f"Error fetching college predictions: {error_msg}"
|
| 194 |
+
|
| 195 |
+
except requests.exceptions.RequestException as e:
|
| 196 |
+
error_msg = f"Request Error: {str(e)}"
|
| 197 |
+
logger.error(f"RequestException: {error_msg}")
|
| 198 |
+
logger.error("=" * 80)
|
| 199 |
+
return f"Error fetching college predictions: {error_msg}"
|
| 200 |
+
|
| 201 |
+
except Exception as e:
|
| 202 |
+
error_msg = f"Unexpected error: {str(e)}"
|
| 203 |
+
logger.error(f"Exception: {error_msg}")
|
| 204 |
+
logger.error("=" * 80)
|
| 205 |
+
return f"Error fetching college predictions: {error_msg}"
|
| 206 |
+
|
| 207 |
+
def mentor_search(college_query: str) -> str:
|
| 208 |
+
"""Search mentors by college name and return formatted links."""
|
| 209 |
+
url = f"https://api.precollege.in/api/v1/mentor/list?college={college_query}&limit=3"
|
| 210 |
+
try:
|
| 211 |
+
response = requests.get(url, timeout=10)
|
| 212 |
+
response.raise_for_status()
|
| 213 |
+
data = response.json()
|
| 214 |
+
|
| 215 |
+
if not data or "data" not in data or not data["data"]:
|
| 216 |
+
return f"No mentors found for '{college_query}'."
|
| 217 |
+
|
| 218 |
+
mentors = data["data"]
|
| 219 |
+
lines = []
|
| 220 |
+
for mentor in mentors:
|
| 221 |
+
name = mentor.get("name", "Unknown")
|
| 222 |
+
username = mentor.get("username", "")
|
| 223 |
+
profile_url = f"https://www.precollege.in/mentor/{username}" if username else "No profile link"
|
| 224 |
+
lines.append(f"{name}: {profile_url}")
|
| 225 |
+
|
| 226 |
+
return f"Mentors for '{college_query}':\n\n" + "\n".join(lines)
|
| 227 |
+
except requests.exceptions.RequestException as e:
|
| 228 |
+
return f"Failed to fetch mentors: {str(e)}"
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# === FUNCTION TOOL WRAPPERS ===
|
| 232 |
+
db_tool = FunctionTool(db_search)
|
| 233 |
+
tavily_tool = FunctionTool(tavily_search)
|
| 234 |
+
predictor_tool = FunctionTool(college_predictor)
|
| 235 |
+
mentor_tool = FunctionTool(mentor_search)
|