kino / app.py
barathm111's picture
Upload app.py
7c8014e verified
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr
class CollegePerformanceAnalyzer:
def __init__(self):
try:
# Preprocess the token to remove any whitespace or newline characters
hf_token = os.environ.get('HF_TOKEN', '').strip()
if not hf_token:
raise ValueError("No Hugging Face token found. Please set it as a Space secret.")
self.client = InferenceClient(
model="mistralai/Mistral-7B-Instruct-v0.1",
token=hf_token
)
except Exception as e:
print(f"Inference Client Initialization Error: {e}")
self.client = None
self.parameters = self._define_performance_parameters()
def _define_performance_parameters(self):
return {
"SS": {"weight": 0.06, "full_name": "Student Strength"},
"FSR": {"weight": 0.075, "full_name": "Faculty-Student Ratio"},
"FQE": {"weight": 0.06, "full_name": "Faculty Qualification Efficiency"},
"FRU": {"weight": 0.06, "full_name": "Faculty Research Utility"},
"OE+MIR": {"weight": 0.03, "full_name": "Outreach & Industry Engagement"},
"GUE": {"weight": 0.12, "full_name": "Graduate Unemployment Excellence"},
"GPHD": {"weight": 0.08, "full_name": "Graduate PhD Pursuit"},
"RD": {"weight": 0.03, "full_name": "Research Development"},
"WD": {"weight": 0.03, "full_name": "Worldwide Diversity"},
"ESCS": {"weight": 0.02, "full_name": "Economic & Social Campus Sustainability"},
"PCS": {"weight": 0.02, "full_name": "Peer Campus Satisfaction"},
"PR": {"weight": 0.10, "full_name": "Perception Rating"},
}
def generate_performance_scores(self, seed=None):
if seed is not None:
random.seed(seed)
parameters = self.parameters.copy()
for param in parameters:
parameters[param]["score"] = random.randint(0, 100)
return parameters
def calculate_weighted_metrics(self, parameters):
for param, values in parameters.items():
values["weighted_score"] = values["score"] * values["weight"]
total_weighted_score = sum(values["weighted_score"] for values in parameters.values())
nirf_rank = int((1000 - total_weighted_score) / 10)
average_score = sum(values["score"] for values in parameters.values()) / len(parameters)
overall_rating = round(average_score / 20)
return {
"parameters": parameters,
"total_weighted_score": total_weighted_score,
"nirf_rank": nirf_rank,
"overall_rating": overall_rating,
}
def generate_ai_feedback(self, analysis_results):
if not self.client:
return "AI feedback could not be generated. Client not initialized."
feedback_prompt = self._construct_feedback_prompt(analysis_results)
try:
# Use requests library for more explicit error handling if needed
import requests
completion = self.client.text_generation(
prompt=feedback_prompt,
max_new_tokens=500,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1,
)
return completion
except Exception as e:
print(f"Detailed AI Feedback Generation Error: {e}")
return f"AI Feedback Generation Error: {str(e)}"
def _construct_feedback_prompt(self, analysis_results):
parameters = analysis_results["parameters"]
overall_rating = analysis_results["overall_rating"]
prompt = "Comprehensive College Performance Strategic Analysis:\n\nPerformance Metrics:\n"
for param, details in parameters.items():
prompt += f"{details['full_name']}: {details['score']}/100\n"
prompt += f"\nOverall Rating: {overall_rating}/5\n\nProvide a detailed strategic analysis including:\n"
prompt += "1. Key institutional strengths\n2. Critical improvement areas\n3. Actionable recommendations\n4. Long-term impact on rankings\n"
return prompt
def chatbot_responses(message, history, seed=None, use_ai_insights=True):
# Ensure history is a list of lists
if not isinstance(history, list):
history = []
# Default response if no specific command is given
if not message or "analyze performance" not in message.lower():
response = "I can assist with college performance analysis. Type 'Analyze performance' to get started."
history.append([message, response])
return history, response
# Create analyzer and generate performance analysis
analyzer = CollegePerformanceAnalyzer()
# Check if client is initialized
if analyzer.client is None:
response = "Error: Unable to initialize AI client. Please check your Hugging Face token."
history.append([message, response])
return history, response
parameters = analyzer.generate_performance_scores(seed)
analysis_results = analyzer.calculate_weighted_metrics(parameters)
# Generate AI insights if enabled
if use_ai_insights:
try:
feedback = analyzer.generate_ai_feedback(analysis_results)
except Exception as e:
feedback = f"AI Insights Error: {str(e)}"
else:
feedback = "AI Insights are disabled."
# Construct detailed response
response = f"""
🎓 **College Performance Analysis**
- **Total Weighted Score**: {analysis_results['total_weighted_score']:.2f}
- **Predicted NIRF Rank**: {analysis_results['nirf_rank']}
- **Overall Rating**: {analysis_results['overall_rating']}/5
**Performance Breakdown:**
"""
# Add detailed parameter scores
for param, details in analysis_results['parameters'].items():
response += f"- {details['full_name']}: {details['score']}/100\n"
response += f"\n**AI Feedback:** {feedback}"
# Append to history
history.append([message, response])
return history, response
def create_chatbot_interface():
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Enter your message")
seed_input = gr.Number(label="Random Seed (Optional)", precision=0)
use_ai_insights_checkbox = gr.Checkbox(label="Enable AI Insights", value=True)
submit = gr.Button("Submit")
clear = gr.Button("Clear")
# Submit button functionality
submit_params = [
msg, # message input
chatbot, # chat history
seed_input, # seed for random generation
use_ai_insights_checkbox # AI insights toggle
]
submit.click(
chatbot_responses,
inputs=submit_params,
outputs=[chatbot, msg]
)
# Enter key functionality
msg.submit(
chatbot_responses,
inputs=submit_params,
outputs=[chatbot, msg]
)
# Clear button functionality
clear.click(
lambda: None,
None,
[chatbot, msg],
queue=False
)
return demo
def main():
interface = create_chatbot_interface()
interface.launch()
if __name__ == "__main__":
main()