File size: 7,704 Bytes
4e1a1b2
 
 
eee6a7e
4e1a1b2
 
 
 
7c8014e
 
 
4e1a1b2
 
 
 
 
 
 
 
7c8014e
4e1a1b2
 
 
 
bc6ff50
4e1a1b2
26b9e97
 
 
 
 
 
 
 
 
 
 
 
4e1a1b2
 
bc6ff50
4e1a1b2
 
 
 
 
 
 
 
bc6ff50
4e1a1b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc6ff50
4e1a1b2
7c8014e
4e1a1b2
 
 
 
7c8014e
 
 
4e1a1b2
 
 
 
 
 
 
 
 
7c8014e
 
4e1a1b2
bc6ff50
4e1a1b2
 
 
 
 
 
 
 
 
 
 
cbe0cb6
4848b58
 
 
 
9d4091d
 
4848b58
 
 
9d4091d
 
eee6a7e
7c8014e
 
 
 
 
 
 
9d4091d
 
 
 
 
 
26b9e97
9d4091d
 
 
 
 
 
 
 
eee6a7e
9d4091d
 
 
eee6a7e
9d4091d
 
 
 
 
 
 
 
 
4848b58
 
 
eee6a7e
26b9e97
f84cc58
cbe0cb6
9d4091d
f84cc58
 
9d4091d
cbe0cb6
26b9e97
9d4091d
 
 
 
 
 
 
 
 
 
 
4848b58
9d4091d
 
 
cbe0cb6
 
9d4091d
4848b58
9d4091d
 
 
 
 
 
 
 
f84cc58
4e1a1b2
f84cc58
 
 
26b9e97
4e1a1b2
f84cc58
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import os
import random
from huggingface_hub import InferenceClient
import gradio as gr

class CollegePerformanceAnalyzer:
    def __init__(self):
        try:
            # Preprocess the token to remove any whitespace or newline characters
            hf_token = os.environ.get('HF_TOKEN', '').strip()
            
            if not hf_token:
                raise ValueError("No Hugging Face token found. Please set it as a Space secret.")
            
            self.client = InferenceClient(
                model="mistralai/Mistral-7B-Instruct-v0.1",
                token=hf_token
            )
        except Exception as e:
            print(f"Inference Client Initialization Error: {e}")
            self.client = None
        
        self.parameters = self._define_performance_parameters()

    def _define_performance_parameters(self):
        return {
            "SS": {"weight": 0.06, "full_name": "Student Strength"},
            "FSR": {"weight": 0.075, "full_name": "Faculty-Student Ratio"},
            "FQE": {"weight": 0.06, "full_name": "Faculty Qualification Efficiency"},
            "FRU": {"weight": 0.06, "full_name": "Faculty Research Utility"},
            "OE+MIR": {"weight": 0.03, "full_name": "Outreach & Industry Engagement"},
            "GUE": {"weight": 0.12, "full_name": "Graduate Unemployment Excellence"},
            "GPHD": {"weight": 0.08, "full_name": "Graduate PhD Pursuit"},
            "RD": {"weight": 0.03, "full_name": "Research Development"},
            "WD": {"weight": 0.03, "full_name": "Worldwide Diversity"},
            "ESCS": {"weight": 0.02, "full_name": "Economic & Social Campus Sustainability"},
            "PCS": {"weight": 0.02, "full_name": "Peer Campus Satisfaction"},
            "PR": {"weight": 0.10, "full_name": "Perception Rating"},
        }

    def generate_performance_scores(self, seed=None):
        if seed is not None:
            random.seed(seed)
        
        parameters = self.parameters.copy()
        for param in parameters:
            parameters[param]["score"] = random.randint(0, 100)
        return parameters

    def calculate_weighted_metrics(self, parameters):
        for param, values in parameters.items():
            values["weighted_score"] = values["score"] * values["weight"]
        
        total_weighted_score = sum(values["weighted_score"] for values in parameters.values())
        nirf_rank = int((1000 - total_weighted_score) / 10)
        average_score = sum(values["score"] for values in parameters.values()) / len(parameters)
        overall_rating = round(average_score / 20)
        
        return {
            "parameters": parameters,
            "total_weighted_score": total_weighted_score,
            "nirf_rank": nirf_rank,
            "overall_rating": overall_rating,
        }

    def generate_ai_feedback(self, analysis_results):
        if not self.client:
            return "AI feedback could not be generated. Client not initialized."
        
        feedback_prompt = self._construct_feedback_prompt(analysis_results)
        
        try:
            # Use requests library for more explicit error handling if needed
            import requests
            
            completion = self.client.text_generation(
                prompt=feedback_prompt,
                max_new_tokens=500,
                temperature=0.7,
                top_p=0.9,
                repetition_penalty=1.1,
            )
            return completion
        except Exception as e:
            print(f"Detailed AI Feedback Generation Error: {e}")
            return f"AI Feedback Generation Error: {str(e)}"

    def _construct_feedback_prompt(self, analysis_results):
        parameters = analysis_results["parameters"]
        overall_rating = analysis_results["overall_rating"]
        
        prompt = "Comprehensive College Performance Strategic Analysis:\n\nPerformance Metrics:\n"
        for param, details in parameters.items():
            prompt += f"{details['full_name']}: {details['score']}/100\n"
        
        prompt += f"\nOverall Rating: {overall_rating}/5\n\nProvide a detailed strategic analysis including:\n"
        prompt += "1. Key institutional strengths\n2. Critical improvement areas\n3. Actionable recommendations\n4. Long-term impact on rankings\n"
        return prompt

def chatbot_responses(message, history, seed=None, use_ai_insights=True):
    # Ensure history is a list of lists
    if not isinstance(history, list):
        history = []

    # Default response if no specific command is given
    if not message or "analyze performance" not in message.lower():
        response = "I can assist with college performance analysis. Type 'Analyze performance' to get started."
        history.append([message, response])
        return history, response

    # Create analyzer and generate performance analysis
    analyzer = CollegePerformanceAnalyzer()
    
    # Check if client is initialized
    if analyzer.client is None:
        response = "Error: Unable to initialize AI client. Please check your Hugging Face token."
        history.append([message, response])
        return history, response

    parameters = analyzer.generate_performance_scores(seed)
    analysis_results = analyzer.calculate_weighted_metrics(parameters)
    
    # Generate AI insights if enabled
    if use_ai_insights:
        try:
            feedback = analyzer.generate_ai_feedback(analysis_results)
        except Exception as e:
            feedback = f"AI Insights Error: {str(e)}"
    else:
        feedback = "AI Insights are disabled."
    
    # Construct detailed response
    response = f"""

🎓 **College Performance Analysis**



- **Total Weighted Score**: {analysis_results['total_weighted_score']:.2f}

- **Predicted NIRF Rank**: {analysis_results['nirf_rank']}

- **Overall Rating**: {analysis_results['overall_rating']}/5



**Performance Breakdown:**

"""
    
    # Add detailed parameter scores
    for param, details in analysis_results['parameters'].items():
        response += f"- {details['full_name']}: {details['score']}/100\n"
    
    response += f"\n**AI Feedback:** {feedback}"
    
    # Append to history
    history.append([message, response])
    return history, response

def create_chatbot_interface():
    with gr.Blocks() as demo:
        chatbot = gr.Chatbot()
        msg = gr.Textbox(label="Enter your message")
        seed_input = gr.Number(label="Random Seed (Optional)", precision=0)
        use_ai_insights_checkbox = gr.Checkbox(label="Enable AI Insights", value=True)
        submit = gr.Button("Submit")
        clear = gr.Button("Clear")

        # Submit button functionality
        submit_params = [
            msg,  # message input
            chatbot,  # chat history
            seed_input,  # seed for random generation
            use_ai_insights_checkbox  # AI insights toggle
        ]
        
        submit.click(
            chatbot_responses, 
            inputs=submit_params, 
            outputs=[chatbot, msg]
        )

        # Enter key functionality
        msg.submit(
            chatbot_responses, 
            inputs=submit_params, 
            outputs=[chatbot, msg]
        )

        # Clear button functionality
        clear.click(
            lambda: None, 
            None, 
            [chatbot, msg], 
            queue=False
        )

    return demo

def main():
    interface = create_chatbot_interface()
    interface.launch()

if __name__ == "__main__":
    main()