ManifestSon's picture
Lecturers Performance Analyzer
a447ed7 verified
import gradio as gr
import torch
import plotly.graph_objects as go
import plotly.express as px
from transformers import RobertaForSequenceClassification, RobertaTokenizer
import pandas as pd
import numpy as np
# Config
MODEL_PATH = "ManifestedSon/Lecturers_Performance"
THEME = gr.themes.Soft()
model_name = "ManifestedSon/Lecturers_Performance"
classifier = pipeline("text-classification", model=model_name)
class PerformanceAnalyzer:
def __init__(self, model_path):
self.model = RobertaForSequenceClassification.from_pretrained(model_path)
self.tokenizer = RobertaTokenizer.from_pretrained(model_path)
self.performance_labels = ["Needs Improvement", "Good", "Very Good", "Excellent", "Outstanding"]
def analyze(self, student_feedback, learning_outcome, self_reflection,
goal_alignment, peer_feedback, timeliness):
# Model prediction
input_text = self._format_input(student_feedback, learning_outcome,
self_reflection, goal_alignment,
peer_feedback, timeliness)
inputs = self.tokenizer(input_text, return_tensors="pt",
truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = self.model(**inputs)
predictions = torch.softmax(outputs.logits, dim=1)
predicted_class = torch.argmax(predictions, dim=1).item()
# Generate visualizations
radar_chart = self._create_radar_chart(student_feedback, learning_outcome,
self_reflection, goal_alignment,
peer_feedback, timeliness)
performance_chart = self._create_performance_chart(predictions[0].tolist())
# Generate recommendations
recommendations = self._generate_recommendations(predicted_class,
predictions[0].tolist())
return radar_chart, performance_chart, recommendations
def _format_input(self, *args):
labels = ["Student Feedback", "Learning Outcome", "Self Reflection",
"Goal Alignment", "Peer Feedback", "Timeliness"]
return " ".join([f"{label}: {value}" for label, value in zip(labels, args)])
def _create_radar_chart(self, *metrics):
categories = ['Student Feedback', 'Learning Outcome', 'Self Reflection',
'Goal Alignment', 'Peer Feedback', 'Timeliness']
fig = go.Figure()
fig.add_trace(go.Scatterpolar(
r=list(map(float, metrics)),
theta=categories,
fill='toself',
name='Performance Metrics'
))
fig.update_layout(
polar=dict(radialaxis=dict(visible=True, range=[0, 5])),
showlegend=False,
title="Performance Metrics Radar"
)
return fig
def _create_performance_chart(self, probabilities):
fig = go.Figure(data=[
go.Bar(x=self.performance_labels, y=probabilities)
])
fig.update_layout(
title="Performance Level Probability Distribution",
xaxis_title="Performance Levels",
yaxis_title="Probability"
)
return fig
def _generate_recommendations(self, predicted_class, probabilities):
base_recommendations = {
0: "Areas requiring immediate attention. Consider professional development programs.",
1: "Shows promise but has room for improvement. Focus on specific areas.",
2: "Solid performance. Continue current practices and seek growth opportunities.",
3: "Excellent work. Consider mentoring others and sharing best practices.",
4: "Outstanding achievement. Lead professional development initiatives."
}
recommendation = f"""
Performance Level: {self.performance_labels[predicted_class]}
Key Recommendations:
{base_recommendations[predicted_class]}
Specific Actions:
1. {self._get_specific_actions(predicted_class)}
2. {self._get_improvement_areas(probabilities)}
"""
return recommendation
def _get_specific_actions(self, performance_level):
actions = {
0: "Enroll in teaching methodology workshops and seek mentoring.",
1: "Implement structured feedback collection and analysis.",
2: "Develop innovative teaching methods and assessment techniques.",
3: "Share successful teaching practices through faculty workshops.",
4: "Create teaching excellence programs for peer development."
}
return actions[performance_level]
def _get_improvement_areas(self, probabilities):
if max(probabilities) < 0.8:
return "Focus on consistency across all performance metrics."
return "Maintain current excellence while innovating teaching methods."
def build_interface():
analyzer = PerformanceAnalyzer(MODEL_PATH)
with gr.Blocks(theme=THEME) as demo:
gr.Markdown("# Lecturer Performance Management System")
with gr.Row():
# Input Column
with gr.Column(scale=1):
inputs = [
gr.Slider(0, 5, label="Student Feedback", info="Rate from 0-5"),
gr.Slider(0, 5, label="Learning Outcome", info="Rate from 0-5"),
gr.Slider(0, 5, label="Self Reflection", info="Rate from 0-5"),
gr.Slider(0, 5, label="Goal Alignment", info="Rate from 0-5"),
gr.Slider(0, 5, label="Peer Feedback", info="Rate from 0-5"),
gr.Slider(0, 5, label="Timeliness", info="Rate from 0-5")
]
analyze_btn = gr.Button("Analyze Performance", variant="primary")
# Output Column
with gr.Column(scale=2):
with gr.Row():
radar_plot = gr.Plot(label="Performance Metrics")
perf_plot = gr.Plot(label="Performance Distribution")
recommendations = gr.Textbox(label="AI Recommendations",
lines=10, interactive=False)
analyze_btn.click(
fn=analyzer.analyze,
inputs=inputs,
outputs=[radar_plot, perf_plot, recommendations]
)
return demo
if __name__ == "__main__":
demo = build_interface()
demo.launch(share=True)