Spaces:
Paused
Paused
Upload 68 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- HF_inference.cpython-310.pyc +0 -0
- HF_inference.py +92 -0
- __pycache__/HF_inference.cpython-310.pyc +0 -0
- __pycache__/app5.cpython-310.pyc +0 -0
- app5.cpython-310.pyc +0 -0
- app5.py +204 -0
- app5_selectbox/QuartzoBold-W9lv.ttf +0 -0
- app5_selectbox/__pycache__/academic_list.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/academic_list.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/class_tbl.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/class_tbl.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/database.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/database_con.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/database_con.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/db_con.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/evaluation.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/evaluation.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/instructor.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/instructor.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/load_llama2.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/program.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/program.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/student.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/student.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/subj_inst.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/subj_inst.cpython-39.pyc +0 -0
- app5_selectbox/__pycache__/subject.cpython-310.pyc +0 -0
- app5_selectbox/__pycache__/subject.cpython-39.pyc +0 -0
- app5_selectbox/academic_list.py +32 -0
- app5_selectbox/app5_selectbox_func.py +31 -0
- app5_selectbox/class_tbl.py +47 -0
- app5_selectbox/database_con.py +18 -0
- app5_selectbox/df4_sentiment_analysis.py +60 -0
- app5_selectbox/evaluation copy 2.py +281 -0
- app5_selectbox/evaluation copy.py +250 -0
- app5_selectbox/evaluation.py +412 -0
- app5_selectbox/evaluation_analysis copy 2.py +378 -0
- app5_selectbox/evaluation_analysis copy.py +330 -0
HF_inference.cpython-310.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
HF_inference.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import requests
|
| 2 |
+
# import time
|
| 3 |
+
# import streamlit as st
|
| 4 |
+
# import os
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# # SECRET_TOKEN
|
| 8 |
+
# SECRET_TOKEN = os.getenv("HF_IBOA")
|
| 9 |
+
|
| 10 |
+
# DISTILIBERT = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased"
|
| 11 |
+
# BERTLARGE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased"
|
| 12 |
+
# BERTBASE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased"
|
| 13 |
+
|
| 14 |
+
# headers = {"Authorization": SECRET_TOKEN}
|
| 15 |
+
|
| 16 |
+
# # @st.cache_resource
|
| 17 |
+
# @st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
|
| 18 |
+
# def query(payload, selected_model):
|
| 19 |
+
# if selected_model == "DISTILIBERT MODEL":
|
| 20 |
+
# API_URL = DISTILIBERT
|
| 21 |
+
# elif selected_model == "BERT-LARGE MODEL":
|
| 22 |
+
# API_URL = BERTLARGE
|
| 23 |
+
# elif selected_model == "BERT-BASE MODEL":
|
| 24 |
+
# API_URL = BERTBASE
|
| 25 |
+
# else:
|
| 26 |
+
# API_URL = DISTILIBERT
|
| 27 |
+
|
| 28 |
+
# start_time = time.time()
|
| 29 |
+
# counter = 0
|
| 30 |
+
# with st.spinner("Processing..."):
|
| 31 |
+
# while True:
|
| 32 |
+
# response = requests.post(API_URL, headers=headers, json=payload)
|
| 33 |
+
# # st.write(response)
|
| 34 |
+
# if response.status_code == 200:
|
| 35 |
+
|
| 36 |
+
# return response.json()
|
| 37 |
+
# else:
|
| 38 |
+
# time.sleep(1) # Wait for 1 second before retrying
|
| 39 |
+
|
| 40 |
+
# def analyze_sintement(text, selected_model):
|
| 41 |
+
# output = query({"inputs": text}, selected_model)
|
| 42 |
+
# if output:
|
| 43 |
+
# # st.success(f"Translation complete!")
|
| 44 |
+
# return output[0][0]['label'], output[0][0]['score']
|
| 45 |
+
# else:
|
| 46 |
+
# st.warning("Error! Please try again.")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
import requests
|
| 51 |
+
import time
|
| 52 |
+
import streamlit as st
|
| 53 |
+
import os
|
| 54 |
+
|
| 55 |
+
# Define constants for API URLs
|
| 56 |
+
MODEL_URLS = {
|
| 57 |
+
"DISTILIBERT MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased",
|
| 58 |
+
"BERT-LARGE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased",
|
| 59 |
+
"BERT-BASE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased"
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
# SECRET_TOKEN
|
| 63 |
+
SECRET_TOKEN = os.getenv("HF_IBOA")
|
| 64 |
+
|
| 65 |
+
# Set headers
|
| 66 |
+
headers = {"Authorization": SECRET_TOKEN}
|
| 67 |
+
|
| 68 |
+
# Define retry parameters
|
| 69 |
+
MAX_RETRIES = 3
|
| 70 |
+
RETRY_INTERVAL = 1 # in seconds
|
| 71 |
+
|
| 72 |
+
@st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
|
| 73 |
+
def query(payload, selected_model):
|
| 74 |
+
# st.write(selected_model)
|
| 75 |
+
API_URL = MODEL_URLS.get(selected_model, MODEL_URLS[selected_model]) # Get API URL based on selected model
|
| 76 |
+
|
| 77 |
+
for retry in range(MAX_RETRIES):
|
| 78 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
| 79 |
+
if response.status_code == 200:
|
| 80 |
+
return response.json()
|
| 81 |
+
else:
|
| 82 |
+
time.sleep(RETRY_INTERVAL)
|
| 83 |
+
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
def analyze_sintement(text, selected_model):
|
| 87 |
+
output = query({"inputs": text}, selected_model)
|
| 88 |
+
if output:
|
| 89 |
+
return output[0][0]['label'], output[0][0]['score']
|
| 90 |
+
else:
|
| 91 |
+
st.warning("Error! Please try again.")
|
| 92 |
+
pass
|
__pycache__/HF_inference.cpython-310.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
__pycache__/app5.cpython-310.pyc
ADDED
|
Binary file (6.76 kB). View file
|
|
|
app5.cpython-310.pyc
ADDED
|
Binary file (6.76 kB). View file
|
|
|
app5.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import time
|
| 3 |
+
from app5_selectbox import academic_list, class_tbl, instructor, program, student, subject, subj_inst, evaluation, evaluation_fac
|
| 4 |
+
from app5_selectbox.database_con import cursor, db_connection
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def student_login(username, password):
|
| 8 |
+
cursor.execute(f"SELECT s.stud_id, s.stud_name, s.class_id, s.user_type FROM student s WHERE s.stud_username='{username}' AND s.stud_password='{password}'")
|
| 9 |
+
return cursor.fetchone()
|
| 10 |
+
|
| 11 |
+
def instructor_login(username, password):
|
| 12 |
+
cursor.execute(f"SELECT i.inst_id, i.inst_name, i.prog_id FROM instructor i WHERE i.inst_username='{username}' AND i.inst_password='{password}'")
|
| 13 |
+
return cursor.fetchone()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def app5():
|
| 17 |
+
st.title("Student-Faculty Evaluation")
|
| 18 |
+
|
| 19 |
+
if not hasattr(st.session_state, "logged_in") or not st.session_state.logged_in:
|
| 20 |
+
st.subheader("User Login")
|
| 21 |
+
username = st.text_input("Username")
|
| 22 |
+
password = st.text_input("Password", type="password")
|
| 23 |
+
|
| 24 |
+
if st.button("Login", type="primary"):
|
| 25 |
+
student_info = student_login(username, password)
|
| 26 |
+
|
| 27 |
+
if student_info:
|
| 28 |
+
st.success(f"Hello, {student_info[1]}! Login Successful")
|
| 29 |
+
st.session_state.logged_in = True
|
| 30 |
+
st.session_state.student_id = student_info[0]
|
| 31 |
+
st.session_state.class_id = student_info[2]
|
| 32 |
+
st.session_state.user_type = student_info[3]
|
| 33 |
+
time.sleep(1)
|
| 34 |
+
st.experimental_rerun()
|
| 35 |
+
|
| 36 |
+
elif not student_info:
|
| 37 |
+
instructor_info = instructor_login(username, password)
|
| 38 |
+
if instructor_info:
|
| 39 |
+
st.success(f"Hello, {instructor_info[1]}! Login Successful")
|
| 40 |
+
st.session_state.logged_in = True
|
| 41 |
+
st.session_state.inst_id = instructor_info[0]
|
| 42 |
+
st.session_state.inst_name = instructor_info[1]
|
| 43 |
+
st.session_state.prog_id = instructor_info[2]
|
| 44 |
+
st.session_state.user_type = 'faculty'
|
| 45 |
+
time.sleep(1)
|
| 46 |
+
st.experimental_rerun()
|
| 47 |
+
else:
|
| 48 |
+
st.error("Invalid Credentials")
|
| 49 |
+
|
| 50 |
+
else:
|
| 51 |
+
st.error("Invalid Credentials")
|
| 52 |
+
else:
|
| 53 |
+
if st.session_state.user_type == 'student':
|
| 54 |
+
cursor.execute(f"SELECT s.stud_name, c.class_year, c.class_section FROM student s JOIN class c ON s.class_id = c.class_id WHERE s.stud_id='{st.session_state.student_id}'")
|
| 55 |
+
student_info = cursor.fetchone()
|
| 56 |
+
student_name, class_year, class_section = student_info
|
| 57 |
+
|
| 58 |
+
st.subheader(f"Hello, {student_name} (Class Year: {class_year}, Section: {class_section}) - Student Evaluation")
|
| 59 |
+
|
| 60 |
+
cursor.execute(f"""
|
| 61 |
+
SELECT si.subj_inst_id, si.sub_id_code, sub.sub_name, i.inst_name
|
| 62 |
+
FROM subj_inst si
|
| 63 |
+
LEFT JOIN evaluation e ON e.subj_inst_id = si.subj_inst_id AND e.stud_id = {st.session_state.student_id}
|
| 64 |
+
INNER JOIN subject sub ON sub.sub_id_code = si.sub_id_code
|
| 65 |
+
INNER JOIN instructor i ON i.inst_id = si.inst_id
|
| 66 |
+
WHERE e.stud_id IS NULL AND si.class_id = '{st.session_state.class_id}'
|
| 67 |
+
""")
|
| 68 |
+
|
| 69 |
+
subjects = cursor.fetchall()
|
| 70 |
+
subject_names = [f"{subject[2]} with Instructor: {subject[3]}" for subject in subjects]
|
| 71 |
+
if not subjects:
|
| 72 |
+
st.warning("You have evaluated all available subjects. Thank you!")
|
| 73 |
+
st.balloons()
|
| 74 |
+
|
| 75 |
+
progress_text = "logging-out . ..."
|
| 76 |
+
my_bar = st.progress(0, text=progress_text)
|
| 77 |
+
for percent_complete in range(100):
|
| 78 |
+
time.sleep(0.01)
|
| 79 |
+
my_bar.progress(percent_complete + 1, text=progress_text)
|
| 80 |
+
|
| 81 |
+
cursor.execute(f"UPDATE student SET is_eval='TRUE' WHERE stud_id = '{st.session_state.student_id}'")
|
| 82 |
+
db_connection.commit()
|
| 83 |
+
st.session_state.pop("logged_in", None)
|
| 84 |
+
st.session_state.pop("student_id", None)
|
| 85 |
+
st.session_state.pop("class_id", None)
|
| 86 |
+
st.experimental_rerun()
|
| 87 |
+
|
| 88 |
+
else:
|
| 89 |
+
selected_subject = st.selectbox("Select a Subject to Evaluate", subject_names)
|
| 90 |
+
selected_subject_id = None
|
| 91 |
+
|
| 92 |
+
for sel_subject in subjects:
|
| 93 |
+
if f"{sel_subject[2]} with Instructor: {sel_subject[3]}" == selected_subject:
|
| 94 |
+
selected_subject_id = sel_subject[0]
|
| 95 |
+
|
| 96 |
+
keys = {}
|
| 97 |
+
if selected_subject_id:
|
| 98 |
+
st.write(f"You are evaluating the {selected_subject}.")
|
| 99 |
+
criteria_list = [
|
| 100 |
+
"Teaching Effectiveness",
|
| 101 |
+
"Course Organization",
|
| 102 |
+
"Accessibility and Communication",
|
| 103 |
+
"Assessment and Grading",
|
| 104 |
+
"Respect and Inclusivity",
|
| 105 |
+
"Engagement and Interactivity",
|
| 106 |
+
"Feedback and Improvement",
|
| 107 |
+
"Accessibility of Learning Resources",
|
| 108 |
+
"Passion and Enthusiasm",
|
| 109 |
+
"Professionalism and Ethical Conduct",
|
| 110 |
+
]
|
| 111 |
+
criteria = {}
|
| 112 |
+
|
| 113 |
+
for i in range(10):
|
| 114 |
+
criteria_key = f"criteria_{i}_{selected_subject_id}"
|
| 115 |
+
criteria_text = f"{criteria_list[i]} (1-5)"
|
| 116 |
+
criteria[i] = st.slider(criteria_text, 1.00, 5.00, 1.00, step=0.05, key=criteria_key)
|
| 117 |
+
keys[f"criteria_{i}"] = criteria_key
|
| 118 |
+
|
| 119 |
+
feedback_comment_key = f"feedback_comment_{selected_subject_id}"
|
| 120 |
+
feedback_comment = st.text_area("Feedback/Comments", key=feedback_comment_key)
|
| 121 |
+
|
| 122 |
+
if st.button("Submit Evaluation"):
|
| 123 |
+
if not feedback_comment:
|
| 124 |
+
st.warning("Please provide feedback comments.")
|
| 125 |
+
else:
|
| 126 |
+
cursor.execute(f"SELECT si.inst_id FROM subj_inst si WHERE si.subj_inst_id = '{selected_subject_id}'")
|
| 127 |
+
instructor_id = cursor.fetchone()
|
| 128 |
+
|
| 129 |
+
if instructor_id:
|
| 130 |
+
instructor_id = instructor_id[0]
|
| 131 |
+
|
| 132 |
+
cursor.execute(f"""INSERT INTO evaluation (
|
| 133 |
+
stud_id,
|
| 134 |
+
subj_inst_id,
|
| 135 |
+
inst_id,
|
| 136 |
+
Teaching_Effectiveness,
|
| 137 |
+
Course_Organization,
|
| 138 |
+
Accessibility_and_Communication,
|
| 139 |
+
Assessment_and_Grading,
|
| 140 |
+
Respect_and_Inclusivity,
|
| 141 |
+
Engagement_and_Interactivity,
|
| 142 |
+
Feedback_and_Improvement,
|
| 143 |
+
Accessibility_of_Learning_Resources,
|
| 144 |
+
Passion_and_Enthusiasm,
|
| 145 |
+
Professionalism_and_Ethical_Conduct,
|
| 146 |
+
comments,
|
| 147 |
+
eval_timestamp)
|
| 148 |
+
VALUES ('{st.session_state.student_id}', '{selected_subject_id}', '{instructor_id}', '{criteria[0]}', '{criteria[1]}', '{criteria[2]}', '{criteria[3]}', '{criteria[4]}', '{criteria[5]}', '{criteria[6]}', '{criteria[7]}', '{criteria[8]}', '{criteria[9]}','{feedback_comment}', strftime('%Y-%m-%d %H:%M:%S','now'))""")
|
| 149 |
+
db_connection.commit()
|
| 150 |
+
|
| 151 |
+
with st.empty():
|
| 152 |
+
st.write("Submitting evaluation...")
|
| 153 |
+
time.sleep(0.3)
|
| 154 |
+
st.success("Evaluation submitted successfully")
|
| 155 |
+
time.sleep(0.4)
|
| 156 |
+
|
| 157 |
+
feedback_comment = ""
|
| 158 |
+
|
| 159 |
+
st.experimental_rerun()
|
| 160 |
+
else:
|
| 161 |
+
for i in keys.keys():
|
| 162 |
+
keys[i] = None
|
| 163 |
+
feedback_comment = None
|
| 164 |
+
|
| 165 |
+
if st.button("Log Out"):
|
| 166 |
+
st.session_state.pop("logged_in", None)
|
| 167 |
+
st.session_state.pop("student_id", None)
|
| 168 |
+
st.session_state.pop("class_id", None)
|
| 169 |
+
st.experimental_rerun()
|
| 170 |
+
elif st.session_state.user_type == 'faculty':
|
| 171 |
+
evaluation_fac.evaluation()
|
| 172 |
+
|
| 173 |
+
elif st.session_state.user_type == 'admin':
|
| 174 |
+
table_name = st.sidebar.selectbox("Select Table", ("academic_list", "class", "instructor", "program", "student", "subject", "subj_inst", "evaluation"))
|
| 175 |
+
|
| 176 |
+
if table_name == "academic_list":
|
| 177 |
+
academic_list.academic_list(table_name)
|
| 178 |
+
elif table_name == "class":
|
| 179 |
+
class_tbl.class_tbl(table_name)
|
| 180 |
+
elif table_name == "instructor":
|
| 181 |
+
instructor.instructor(table_name)
|
| 182 |
+
elif table_name == "program":
|
| 183 |
+
program.program(table_name)
|
| 184 |
+
elif table_name == "student":
|
| 185 |
+
student.student(table_name)
|
| 186 |
+
elif table_name == "subject":
|
| 187 |
+
subject.subject(table_name)
|
| 188 |
+
elif table_name == "subj_inst":
|
| 189 |
+
subj_inst.subj_inst(table_name)
|
| 190 |
+
elif table_name == "evaluation":
|
| 191 |
+
evaluation.evaluation()
|
| 192 |
+
else:
|
| 193 |
+
st.error("Select a valid table from the sidebar.")
|
| 194 |
+
|
| 195 |
+
if st.button("Log Out"):
|
| 196 |
+
st.session_state.pop("logged_in", None)
|
| 197 |
+
st.session_state.pop("student_id", None)
|
| 198 |
+
st.session_state.pop("class_id", None)
|
| 199 |
+
st.experimental_rerun()
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# Call the main function
|
| 203 |
+
if __name__ == "__main__":
|
| 204 |
+
app5()
|
app5_selectbox/QuartzoBold-W9lv.ttf
ADDED
|
Binary file (121 kB). View file
|
|
|
app5_selectbox/__pycache__/academic_list.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
app5_selectbox/__pycache__/academic_list.cpython-39.pyc
ADDED
|
Binary file (1.38 kB). View file
|
|
|
app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc
ADDED
|
Binary file (1.38 kB). View file
|
|
|
app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc
ADDED
|
Binary file (1.42 kB). View file
|
|
|
app5_selectbox/__pycache__/class_tbl.cpython-310.pyc
ADDED
|
Binary file (2.45 kB). View file
|
|
|
app5_selectbox/__pycache__/class_tbl.cpython-39.pyc
ADDED
|
Binary file (2.46 kB). View file
|
|
|
app5_selectbox/__pycache__/database.cpython-39.pyc
ADDED
|
Binary file (661 Bytes). View file
|
|
|
app5_selectbox/__pycache__/database_con.cpython-310.pyc
ADDED
|
Binary file (312 Bytes). View file
|
|
|
app5_selectbox/__pycache__/database_con.cpython-39.pyc
ADDED
|
Binary file (351 Bytes). View file
|
|
|
app5_selectbox/__pycache__/db_con.cpython-39.pyc
ADDED
|
Binary file (429 Bytes). View file
|
|
|
app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation.cpython-39.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc
ADDED
|
Binary file (8.31 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc
ADDED
|
Binary file (5.4 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc
ADDED
|
Binary file (5.29 kB). View file
|
|
|
app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|
app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc
ADDED
|
Binary file (520 Bytes). View file
|
|
|
app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc
ADDED
|
Binary file (538 Bytes). View file
|
|
|
app5_selectbox/__pycache__/instructor.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
app5_selectbox/__pycache__/instructor.cpython-39.pyc
ADDED
|
Binary file (2.32 kB). View file
|
|
|
app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc
ADDED
|
Binary file (1.8 kB). View file
|
|
|
app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc
ADDED
|
Binary file (624 Bytes). View file
|
|
|
app5_selectbox/__pycache__/load_llama2.cpython-310.pyc
ADDED
|
Binary file (709 Bytes). View file
|
|
|
app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc
ADDED
|
Binary file (2.18 kB). View file
|
|
|
app5_selectbox/__pycache__/program.cpython-310.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
app5_selectbox/__pycache__/program.cpython-39.pyc
ADDED
|
Binary file (1.31 kB). View file
|
|
|
app5_selectbox/__pycache__/student.cpython-310.pyc
ADDED
|
Binary file (4.11 kB). View file
|
|
|
app5_selectbox/__pycache__/student.cpython-39.pyc
ADDED
|
Binary file (4.13 kB). View file
|
|
|
app5_selectbox/__pycache__/subj_inst.cpython-310.pyc
ADDED
|
Binary file (5.65 kB). View file
|
|
|
app5_selectbox/__pycache__/subj_inst.cpython-39.pyc
ADDED
|
Binary file (5.83 kB). View file
|
|
|
app5_selectbox/__pycache__/subject.cpython-310.pyc
ADDED
|
Binary file (2.63 kB). View file
|
|
|
app5_selectbox/__pycache__/subject.cpython-39.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
app5_selectbox/academic_list.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# academic_list.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from app5_selectbox.database_con import cursor, db_connection #connect_to_database, execute_query
|
| 4 |
+
from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def academic_list(table_name):
|
| 8 |
+
# Include the academic_list-specific code here
|
| 9 |
+
acad_id = generate_unique_4(cursor, "acad_id", table_name)
|
| 10 |
+
acad_year = st.text_input("Academic Year", key="acad_year")
|
| 11 |
+
sem_num = st.selectbox("Semester Number", ("1", "2"), key="sem_num")
|
| 12 |
+
|
| 13 |
+
if st.button("Insert Academic List Record"):
|
| 14 |
+
# Check if the acad_year and sem_num are provided
|
| 15 |
+
if not acad_year or not sem_num:
|
| 16 |
+
st.error("Academic Year and Semester Number are required. Please provide values for both fields.")
|
| 17 |
+
else:
|
| 18 |
+
try:
|
| 19 |
+
# Check for duplicates in acad_year and sem_num
|
| 20 |
+
cursor.execute("SELECT acad_id FROM academic_list WHERE acad_year = %s AND sem_num = %s", (acad_year, sem_num))
|
| 21 |
+
duplicate = cursor.fetchone()
|
| 22 |
+
if duplicate is not None:
|
| 23 |
+
st.error("Duplicate entry found. Please provide unique Academic Year and Semester Number.")
|
| 24 |
+
else:
|
| 25 |
+
# Insert a record into the academic_list table
|
| 26 |
+
cursor.execute("INSERT INTO academic_list (acad_id, acad_year, sem_num) VALUES (%s, %s, %s)",
|
| 27 |
+
(acad_id, acad_year, sem_num))
|
| 28 |
+
db_connection.commit()
|
| 29 |
+
st.success("Record inserted successfully.")
|
| 30 |
+
except Exception as e:
|
| 31 |
+
st.error(f"An error occurred: {str(e)}")
|
| 32 |
+
display_table(cursor, table_name)
|
app5_selectbox/app5_selectbox_func.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import time
|
| 4 |
+
import pandas as pd
|
| 5 |
+
|
| 6 |
+
def generate_unique_4(cursor, col_id, tblname):
|
| 7 |
+
while True:
|
| 8 |
+
unique_id = random.randint(1000, 9999)
|
| 9 |
+
cursor.execute(f"SELECT {col_id} FROM {tblname} WHERE {col_id} = {unique_id}")
|
| 10 |
+
result = cursor.fetchone()
|
| 11 |
+
if result is None:
|
| 12 |
+
return unique_id
|
| 13 |
+
|
| 14 |
+
def display_table(cursor, table_name):
|
| 15 |
+
try:
|
| 16 |
+
cursor.execute(f"pragma table_info('{table_name}')")
|
| 17 |
+
column_data = cursor.fetchall()
|
| 18 |
+
column_names = [column[1] for column in column_data]
|
| 19 |
+
|
| 20 |
+
cursor.execute(f"SELECT * FROM {table_name}")
|
| 21 |
+
data = cursor.fetchall()
|
| 22 |
+
|
| 23 |
+
if not data:
|
| 24 |
+
st.warning(f"No data found in the {table_name} table.")
|
| 25 |
+
else:
|
| 26 |
+
df = pd.DataFrame(data, columns=column_names)
|
| 27 |
+
st.header(f"{table_name} Table")
|
| 28 |
+
st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
|
| 29 |
+
|
| 30 |
+
except Exception as e:
|
| 31 |
+
st.error(f"An error occurred while fetching data from {table_name}: {str(e)}")
|
app5_selectbox/class_tbl.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# class.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from app5_selectbox.database_con import cursor, db_connection
|
| 5 |
+
from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# In the display_table function, fetch and display prog_code
|
| 9 |
+
def display_class_table(cursor, table_name):
|
| 10 |
+
if table_name == "class":
|
| 11 |
+
cursor.execute("SELECT class.class_id, class.prog_id, program.prog_code, class.class_year, class.class_section FROM class INNER JOIN program ON class.prog_id = program.prog_id")
|
| 12 |
+
data = cursor.fetchall()
|
| 13 |
+
column_names = [i[0] for i in cursor.description]
|
| 14 |
+
df = pd.DataFrame(data, columns=column_names)
|
| 15 |
+
st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
|
| 16 |
+
|
| 17 |
+
def class_tbl(table_name):
|
| 18 |
+
class_id = generate_unique_4(cursor, "class_id", table_name)
|
| 19 |
+
|
| 20 |
+
# Fetch available programs from the 'program' table
|
| 21 |
+
cursor.execute("SELECT prog_id, prog_name, prog_code FROM program")
|
| 22 |
+
available_programs = cursor.fetchall()
|
| 23 |
+
prog_id = st.selectbox("Program ID", available_programs, format_func=lambda row: f"{row[1]} ({row[2]})", key="prog_id")[0]
|
| 24 |
+
class_year = st.selectbox("Class Year", ("1", "2", "3", "4"), key="class_year")
|
| 25 |
+
class_section = st.text_input("Class Section", key="class_section", max_chars=1).upper()
|
| 26 |
+
|
| 27 |
+
if st.button("Insert Class Record"):
|
| 28 |
+
# Check if the class_year and class_section are provided
|
| 29 |
+
if not class_year or not class_section:
|
| 30 |
+
st.error("Class Year and Class Section are required. Please provide values for both fields.")
|
| 31 |
+
else:
|
| 32 |
+
try:
|
| 33 |
+
# Check for duplicates
|
| 34 |
+
cursor.execute("SELECT class_id FROM class WHERE prog_id = %s AND class_year = %s AND class_section = %s",
|
| 35 |
+
(prog_id, class_year, class_section))
|
| 36 |
+
result = cursor.fetchone()
|
| 37 |
+
if result is not None:
|
| 38 |
+
st.error("A record with the same Program ID, Class Year, and Class Section already exists.")
|
| 39 |
+
else:
|
| 40 |
+
# Insert a record into the class table
|
| 41 |
+
cursor.execute("INSERT INTO class (class_id, prog_id, class_year, class_section) VALUES (%s, %s, %s, %s)",
|
| 42 |
+
(class_id, prog_id, class_year, class_section))
|
| 43 |
+
db_connection.commit()
|
| 44 |
+
st.success("Record inserted successfully.")
|
| 45 |
+
except Exception as e:
|
| 46 |
+
st.error(f"An error occurred: {str(e)}")
|
| 47 |
+
display_class_table(cursor, table_name)
|
app5_selectbox/database_con.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # database.py
|
| 2 |
+
# import mysql.connector
|
| 3 |
+
|
| 4 |
+
# # Connect to your MySQL database
|
| 5 |
+
# db_connection = mysql.connector.connect(
|
| 6 |
+
# host="localhost",
|
| 7 |
+
# user="root",
|
| 8 |
+
# password="",
|
| 9 |
+
# database="university_evaluation_5"
|
| 10 |
+
# )
|
| 11 |
+
|
| 12 |
+
# cursor = db_connection.cursor()
|
| 13 |
+
|
| 14 |
+
#### for sqlite connection ####
|
| 15 |
+
|
| 16 |
+
import sqlite3
|
| 17 |
+
db_connection = sqlite3.connect('/home/aibo/prototype_v1/prototype/database/data.sqlite', check_same_thread=False)
|
| 18 |
+
cursor = db_connection.cursor()
|
app5_selectbox/df4_sentiment_analysis.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import g4f
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def sentiment_func(message_list):
|
| 6 |
+
message_list=[
|
| 7 |
+
"Your lectures were so dull and uninspiring, I couldn't help but zone out",
|
| 8 |
+
"you have an extraordinary talent for leadership.",
|
| 9 |
+
"The instructor's indifference made it difficult to remain engaged or motivated",
|
| 10 |
+
"The lack of enthusiasm from the instructor made the class feel like a chore",
|
| 11 |
+
"Salamat sa iyong inspirasyon at dedikasyon sa aming edukasyon.",
|
| 12 |
+
"Sa bawat pagkakataon, lumalalim ang aming pag-unawa sa mga aralin.",
|
| 13 |
+
"Thanks for being dedicated to our education.",
|
| 14 |
+
"You show the societal importance of education.",
|
| 15 |
+
"The instructor's disinterested demeanor was reflected in the overall class atmosphere"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
message_list = '[label]\n'.join(message_list)
|
| 19 |
+
# print(message_list)
|
| 20 |
+
prompt = f"""
|
| 21 |
+
Please provide a single-word response per sentence.
|
| 22 |
+
label the following sentences if it is positive,negative
|
| 23 |
+
sentence list = {message_list}
|
| 24 |
+
your output is should in comma separated
|
| 25 |
+
example output : positive,negative,negative,positive
|
| 26 |
+
"""
|
| 27 |
+
# Please provide a single-word response.
|
| 28 |
+
|
| 29 |
+
print(prompt)
|
| 30 |
+
while True:
|
| 31 |
+
try:
|
| 32 |
+
# streamed completion
|
| 33 |
+
response = g4f.ChatCompletion.create(
|
| 34 |
+
model="gpt-3.5-turbo",
|
| 35 |
+
# provider=g4f.Provider.GeekGpt,
|
| 36 |
+
provider=g4f.Provider.You,
|
| 37 |
+
|
| 38 |
+
# model="gpt-4",
|
| 39 |
+
# provider=g4f.Provider.Bing,
|
| 40 |
+
|
| 41 |
+
messages=[{"role": "user", "content": prompt}],
|
| 42 |
+
stream=True,
|
| 43 |
+
)
|
| 44 |
+
returned_output = ""
|
| 45 |
+
for message in response:
|
| 46 |
+
# print(message, flush=True, end='')
|
| 47 |
+
returned_output += message
|
| 48 |
+
# print(message)
|
| 49 |
+
returned_output = returned_output.split(',')
|
| 50 |
+
# Trim extra white spaces and convert to lowercase
|
| 51 |
+
returned_output = [item.strip().lower() for item in returned_output]
|
| 52 |
+
return returned_output
|
| 53 |
+
# print(returned_output)
|
| 54 |
+
break # Exit the loop if the chat completes successfully
|
| 55 |
+
|
| 56 |
+
except Exception as e:
|
| 57 |
+
# Handle the error (e.g., log it or take appropriate action)
|
| 58 |
+
# Sleep for a moment before retrying
|
| 59 |
+
print("error....",e)
|
| 60 |
+
time.sleep(0.4)
|
app5_selectbox/evaluation copy 2.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# evaluation.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from app5_selectbox.database_con import cursor, db_connection
|
| 5 |
+
from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
|
| 6 |
+
from app5_selectbox.evaluation_analysis import eval_analysis
|
| 7 |
+
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
import seaborn as sns
|
| 10 |
+
import plotly.express as px
|
| 11 |
+
import plotly.graph_objs as go
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Function to perform analytics on instructors
|
| 16 |
+
def analyze_instructors(cursor):
|
| 17 |
+
try:
|
| 18 |
+
# Execute the SQL query to fetch the evaluation data
|
| 19 |
+
cursor.execute("SELECT * FROM evaluation")
|
| 20 |
+
evaluation_data = cursor.fetchall()
|
| 21 |
+
|
| 22 |
+
if not evaluation_data:
|
| 23 |
+
st.warning("No evaluation data found.")
|
| 24 |
+
else:
|
| 25 |
+
# Create a DataFrame from the fetched data and set column names
|
| 26 |
+
column_names = [i[0].replace("_", " ") for i in cursor.description]
|
| 27 |
+
df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 28 |
+
|
| 29 |
+
# Get the column names for the score criteria
|
| 30 |
+
criteria_columns = [f"score_criteria_{i}" for i in range(10)]
|
| 31 |
+
column_names = [column[0].replace("_", " ") for column in cursor.description][4:14]
|
| 32 |
+
# Define criteria labels globally
|
| 33 |
+
criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
|
| 34 |
+
|
| 35 |
+
instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index()
|
| 36 |
+
|
| 37 |
+
cursor.execute("SELECT inst_id, inst_name FROM instructor")
|
| 38 |
+
instructor_data = cursor.fetchall()
|
| 39 |
+
instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"])
|
| 40 |
+
instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left")
|
| 41 |
+
|
| 42 |
+
selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique())
|
| 43 |
+
|
| 44 |
+
filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]]
|
| 45 |
+
|
| 46 |
+
selected_instructor_comments = list(filtered_data["comments"])
|
| 47 |
+
st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
|
| 48 |
+
|
| 49 |
+
cursor.execute("""
|
| 50 |
+
SELECT subj_inst.subj_inst_id, subject.sub_name
|
| 51 |
+
FROM subj_inst
|
| 52 |
+
INNER JOIN subject
|
| 53 |
+
ON subj_inst.sub_id_code = subject.sub_id_code
|
| 54 |
+
""")
|
| 55 |
+
|
| 56 |
+
subject_data = cursor.fetchall()
|
| 57 |
+
subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"])
|
| 58 |
+
filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left")
|
| 59 |
+
|
| 60 |
+
subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
|
| 61 |
+
|
| 62 |
+
subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
fig = go.Figure()
|
| 66 |
+
|
| 67 |
+
# for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]:
|
| 68 |
+
# fig.add_trace(go.Bar(
|
| 69 |
+
# x=subject_avg_scores["sub_name"],
|
| 70 |
+
# y=subject_avg_scores[criterion],
|
| 71 |
+
# name=label,
|
| 72 |
+
# ))
|
| 73 |
+
|
| 74 |
+
criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
|
| 75 |
+
for criterion, label in criteria_labels:
|
| 76 |
+
fig.add_trace(go.Bar(
|
| 77 |
+
x=subject_avg_scores["sub name"],
|
| 78 |
+
y=subject_avg_scores[criterion],
|
| 79 |
+
name=label,
|
| 80 |
+
))
|
| 81 |
+
|
| 82 |
+
# Add the total average score above the bars
|
| 83 |
+
fig.add_trace(go.Scatter(
|
| 84 |
+
x=subject_avg_scores["sub name"],
|
| 85 |
+
y=subject_avg_scores["total average"],
|
| 86 |
+
mode="markers+text",
|
| 87 |
+
text=round(subject_avg_scores["total average"],2),
|
| 88 |
+
textposition="top center",
|
| 89 |
+
textfont=dict(size=14),
|
| 90 |
+
marker=dict(size=10, color="black"),
|
| 91 |
+
name="Total Average",
|
| 92 |
+
))
|
| 93 |
+
|
| 94 |
+
# Display the overall average of all subjects
|
| 95 |
+
overall_average = subject_avg_scores["total average"].mean()
|
| 96 |
+
# st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}")
|
| 97 |
+
fig.update_layout(
|
| 98 |
+
barmode="group",
|
| 99 |
+
title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
|
| 100 |
+
xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}",
|
| 101 |
+
yaxis_title="Average Score",
|
| 102 |
+
)
|
| 103 |
+
st.plotly_chart(fig)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# st.write("**Average score per Criteria**")
|
| 108 |
+
results_to_prompt = "Average score per Criteria\n"
|
| 109 |
+
criteria_averages = []
|
| 110 |
+
for criteria in filtered_data.columns[4:14]:
|
| 111 |
+
average_score = round(sum(filtered_data[criteria] / len(filtered_data)), 2)
|
| 112 |
+
criteria_averages.append((criteria, average_score))
|
| 113 |
+
results_to_prompt += f"{criteria}: {average_score}/5, \n"
|
| 114 |
+
# print(results_to_prompt)
|
| 115 |
+
|
| 116 |
+
# st.write(results_to_prompt)
|
| 117 |
+
# # Create a Plotly bar chart
|
| 118 |
+
fig = go.Figure()
|
| 119 |
+
fig.add_trace(go.Bar(
|
| 120 |
+
x=[criteria for criteria, _ in criteria_averages],
|
| 121 |
+
y=[score for _, score in criteria_averages],
|
| 122 |
+
text=[f"{score}/5" for _, score in criteria_averages],
|
| 123 |
+
# textposition='outside',
|
| 124 |
+
))
|
| 125 |
+
|
| 126 |
+
fig.update_layout(
|
| 127 |
+
title="Average Score per Criteria",
|
| 128 |
+
xaxis_title="Criteria",
|
| 129 |
+
yaxis_title="Average Score",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
st.plotly_chart(fig)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
for subject in subject_avg_scores["sub name"]:
|
| 144 |
+
subject_filtered_data = filtered_data[filtered_data["sub name"] == subject]
|
| 145 |
+
|
| 146 |
+
fig = go.Figure()
|
| 147 |
+
st.write(subject_filtered_data)
|
| 148 |
+
for criterion, label in criteria_labels:
|
| 149 |
+
fig.add_trace(go.Bar(
|
| 150 |
+
x=[label],
|
| 151 |
+
y=[subject_filtered_data[criterion].mean()],
|
| 152 |
+
text=[subject_filtered_data[criterion].mean()],
|
| 153 |
+
name=label,
|
| 154 |
+
))
|
| 155 |
+
|
| 156 |
+
# Calculate the "total average" based on criteria columns
|
| 157 |
+
total_average = subject_filtered_data[column_names].mean(axis=1).mean()
|
| 158 |
+
|
| 159 |
+
# # dot point for Total Average"
|
| 160 |
+
# fig.add_trace(go.Scatter(
|
| 161 |
+
# x=[label],
|
| 162 |
+
# y=[total_average],
|
| 163 |
+
# mode="markers+text",
|
| 164 |
+
# text=[round(total_average, 2)],
|
| 165 |
+
# textposition="top center",
|
| 166 |
+
# textfont=dict(size=14),
|
| 167 |
+
# marker=dict(size=10, color="black"),
|
| 168 |
+
# name="Total Average",
|
| 169 |
+
# ))
|
| 170 |
+
|
| 171 |
+
fig.update_layout(
|
| 172 |
+
barmode="group",
|
| 173 |
+
title=f"{subject} Average Score: {total_average:.2f}",
|
| 174 |
+
# xaxis_title=f"Overall Average Score: {total_average:.2f}",
|
| 175 |
+
yaxis_title="Average Score",
|
| 176 |
+
)
|
| 177 |
+
st.plotly_chart(fig)
|
| 178 |
+
|
| 179 |
+
# selected_instructor_comments.append(results_to_prompt)
|
| 180 |
+
# st.write(selected_instructor_comments)
|
| 181 |
+
return selected_instructor, selected_instructor_comments, results_to_prompt
|
| 182 |
+
|
| 183 |
+
except Exception as e:
|
| 184 |
+
st.error(f"An error occurred during data analytics: {str(e)}")
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
# try:
|
| 188 |
+
# # Execute the SQL query to fetch the evaluation data
|
| 189 |
+
# cursor.execute("SELECT * FROM evaluation")
|
| 190 |
+
# evaluation_data = cursor.fetchall()
|
| 191 |
+
|
| 192 |
+
# if not evaluation_data:
|
| 193 |
+
# st.warning("No evaluation data found.")
|
| 194 |
+
# else:
|
| 195 |
+
# # Create a DataFrame from the fetched data and set column names
|
| 196 |
+
# column_names = [i[0] for i in cursor.description]
|
| 197 |
+
# df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 198 |
+
|
| 199 |
+
# # Group data by instructor and calculate average scores per criteria
|
| 200 |
+
# instructor_avg_scores = df.groupby("inst_id").agg({
|
| 201 |
+
# "score_criteria_1": "mean",
|
| 202 |
+
# "score_criteria_2": "mean",
|
| 203 |
+
# "score_criteria_3": "mean"
|
| 204 |
+
# }).reset_index()
|
| 205 |
+
|
| 206 |
+
# # Join with instructor data to get their names
|
| 207 |
+
# cursor.execute("SELECT inst_id, inst_name FROM instructor")
|
| 208 |
+
# instructor_data = cursor.fetchall()
|
| 209 |
+
# instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"])
|
| 210 |
+
# instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
|
| 211 |
+
|
| 212 |
+
# # Join with subj_inst and subject tables to get subject names
|
| 213 |
+
# cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code")
|
| 214 |
+
# subject_data = cursor.fetchall()
|
| 215 |
+
# subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"])
|
| 216 |
+
# df = df.merge(subject_df, on="subj_inst_id", how="left")
|
| 217 |
+
|
| 218 |
+
# # Create a select box to filter by instructor and subject
|
| 219 |
+
# selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique())
|
| 220 |
+
# selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique()
|
| 221 |
+
# selected_subject = st.selectbox("Select Subject", selected_subjects)
|
| 222 |
+
|
| 223 |
+
# # Filter data based on the selected instructor and subject
|
| 224 |
+
# filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) &
|
| 225 |
+
# (df["sub_name"] == selected_subject)]
|
| 226 |
+
|
| 227 |
+
# # Create a bar chart for average scores per criteria
|
| 228 |
+
# fig = px.bar(instructor_avg_scores, x="instructor_name",
|
| 229 |
+
# y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
|
| 230 |
+
# labels={"value": "Average Score", "variable": "Criteria"},
|
| 231 |
+
# title="Average Scores per Criteria by Instructor")
|
| 232 |
+
# st.plotly_chart(fig)
|
| 233 |
+
|
| 234 |
+
# # Group data by subject instructor and calculate average scores
|
| 235 |
+
# subject_avg_scores = filtered_data.groupby("sub_name").agg({
|
| 236 |
+
# "score_criteria_1": "mean",
|
| 237 |
+
# "score_criteria_2": "mean",
|
| 238 |
+
# "score_criteria_3": "mean"
|
| 239 |
+
# }).reset_index()
|
| 240 |
+
|
| 241 |
+
# # Create a bar chart for average scores per criteria for the selected subject
|
| 242 |
+
# fig = px.bar(subject_avg_scores, x="sub_name",
|
| 243 |
+
# y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
|
| 244 |
+
# labels={"value": "Average Score", "variable": "Criteria"},
|
| 245 |
+
# title=f"Average Scores per Criteria for Subject {selected_subject}")
|
| 246 |
+
# st.plotly_chart(fig)
|
| 247 |
+
|
| 248 |
+
# except Exception as e:
|
| 249 |
+
# st.error(f"An error occurred during data analytics: {str(e)}")
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def evaluation(cursor, table_name):
|
| 257 |
+
try:
|
| 258 |
+
# Execute the SQL query to fetch the evaluation data
|
| 259 |
+
cursor.execute("SELECT * FROM evaluation")
|
| 260 |
+
evaluation_data = cursor.fetchall()
|
| 261 |
+
|
| 262 |
+
if not evaluation_data:
|
| 263 |
+
st.warning("No evaluation data found.")
|
| 264 |
+
else:
|
| 265 |
+
# Create a DataFrame from the fetched data and set column names
|
| 266 |
+
column_names = [i[0] for i in cursor.description]
|
| 267 |
+
df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 268 |
+
|
| 269 |
+
# # Display the table with centered text
|
| 270 |
+
# st.header(f"{table_name} Table")
|
| 271 |
+
# st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
|
| 272 |
+
|
| 273 |
+
analyze_instructors_results = analyze_instructors(cursor)
|
| 274 |
+
|
| 275 |
+
if st.button("Analyze comments"):
|
| 276 |
+
# st.write(analyze_instructors_results[0], analyze_instructors_results[1])
|
| 277 |
+
eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2])
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
except Exception as e:
|
| 281 |
+
st.error(f"An error occurred while fetching evaluation data: {str(e)}")
|
app5_selectbox/evaluation copy.py
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# evaluation.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from app5_selectbox.database_con import cursor, db_connection
|
| 5 |
+
from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
|
| 6 |
+
from app5_selectbox.evaluation_analysis import eval_analysis
|
| 7 |
+
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
import seaborn as sns
|
| 10 |
+
import plotly.express as px
|
| 11 |
+
import plotly.graph_objs as go
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Function to perform analytics on instructors
|
| 16 |
+
def analyze_instructors(cursor):
|
| 17 |
+
try:
|
| 18 |
+
# Execute the SQL query to fetch the evaluation data
|
| 19 |
+
cursor.execute("SELECT * FROM evaluation")
|
| 20 |
+
evaluation_data = cursor.fetchall()
|
| 21 |
+
|
| 22 |
+
if not evaluation_data:
|
| 23 |
+
st.warning("No evaluation data found.")
|
| 24 |
+
else:
|
| 25 |
+
# Create a DataFrame from the fetched data and set column names
|
| 26 |
+
column_names = [i[0].replace("_"," ") for i in cursor.description]
|
| 27 |
+
# for i in range(len(column_names)):
|
| 28 |
+
# column_names[i] = column_names[i].replace("_"," ")
|
| 29 |
+
# st.write(column_names)
|
| 30 |
+
# .replace("_"," ")
|
| 31 |
+
df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 32 |
+
|
| 33 |
+
# # Group data by instructor and calculate average scores per criteria
|
| 34 |
+
# instructor_avg_scores = df.groupby("inst_id").agg({
|
| 35 |
+
# "score_criteria_1": "mean",
|
| 36 |
+
# "score_criteria_2": "mean",
|
| 37 |
+
# "score_criteria_3": "mean"
|
| 38 |
+
# }).reset_index()
|
| 39 |
+
# Get the column names from the cursor description
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
criteria_columns = [f"score_criteria_{i}" for i in range(10)]
|
| 44 |
+
|
| 45 |
+
column_names = [column[0].replace("_"," ") for column in cursor.description][4:14]
|
| 46 |
+
# Print the column names
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index()
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# Join with instructor data to get their names
|
| 53 |
+
cursor.execute("SELECT inst_id, inst_name FROM instructor")
|
| 54 |
+
instructor_data = cursor.fetchall()
|
| 55 |
+
instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"])
|
| 56 |
+
instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# Create a select box to filter by instructor
|
| 62 |
+
selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique())
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Filter data based on the selected instructor
|
| 67 |
+
filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]]
|
| 68 |
+
# st.write(filtered_data[filtered_data.columns[4:15]])
|
| 69 |
+
|
| 70 |
+
# st.write(selected_instructor)
|
| 71 |
+
selected_instructor_comments = list(filtered_data["comments"])
|
| 72 |
+
# st.write(selected_instructor_comments) #get all comments fro the instructor
|
| 73 |
+
st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
|
| 74 |
+
|
| 75 |
+
# Join with the subj_inst and subject tables to get subject names
|
| 76 |
+
cursor.execute("""
|
| 77 |
+
SELECT subj_inst.subj_inst_id, subject.sub_name
|
| 78 |
+
FROM subj_inst
|
| 79 |
+
INNER JOIN subject
|
| 80 |
+
ON subj_inst.sub_id_code = subject.sub_id_code
|
| 81 |
+
""")
|
| 82 |
+
|
| 83 |
+
subject_data = cursor.fetchall()
|
| 84 |
+
subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"])
|
| 85 |
+
filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left")
|
| 86 |
+
|
| 87 |
+
# # Group data by subject and calculate average scores per criteria
|
| 88 |
+
# subject_avg_scores = filtered_data.groupby("sub_name").agg({
|
| 89 |
+
# "score_criteria_1": "mean",
|
| 90 |
+
# "score_criteria_2": "mean",
|
| 91 |
+
# "score_criteria_3": "mean"
|
| 92 |
+
# }).reset_index()
|
| 93 |
+
|
| 94 |
+
# criteria_columns = [f"score_criteria_{i}" for i in range(10)]
|
| 95 |
+
subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# # Calculate the total average score for each subject
|
| 99 |
+
# subject_avg_scores["total_average"] = subject_avg_scores[["score_criteria_1", "score_criteria_2", "score_criteria_3"]].mean(axis=1)
|
| 100 |
+
# criteria_columns = [f"score_criteria_{i}" for i in range(10)]
|
| 101 |
+
|
| 102 |
+
subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Create a grouped bar chart for average scores per criteria by subject
|
| 107 |
+
fig = go.Figure()
|
| 108 |
+
|
| 109 |
+
# for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]:
|
| 110 |
+
# fig.add_trace(go.Bar(
|
| 111 |
+
# x=subject_avg_scores["sub_name"],
|
| 112 |
+
# y=subject_avg_scores[criterion],
|
| 113 |
+
# name=label,
|
| 114 |
+
# ))
|
| 115 |
+
|
| 116 |
+
criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
|
| 117 |
+
for criterion, label in criteria_labels:
|
| 118 |
+
fig.add_trace(go.Bar(
|
| 119 |
+
x=subject_avg_scores["sub name"],
|
| 120 |
+
y=subject_avg_scores[criterion],
|
| 121 |
+
name=label,
|
| 122 |
+
))
|
| 123 |
+
|
| 124 |
+
# Add the total average score above the bars
|
| 125 |
+
fig.add_trace(go.Scatter(
|
| 126 |
+
x=subject_avg_scores["sub name"],
|
| 127 |
+
y=subject_avg_scores["total average"],
|
| 128 |
+
mode="markers+text",
|
| 129 |
+
text=round(subject_avg_scores["total average"],2),
|
| 130 |
+
textposition="top center",
|
| 131 |
+
textfont=dict(size=14),
|
| 132 |
+
marker=dict(size=10, color="black"),
|
| 133 |
+
name="Total Average",
|
| 134 |
+
))
|
| 135 |
+
|
| 136 |
+
# Display the overall average of all subjects
|
| 137 |
+
overall_average = subject_avg_scores["total average"].mean()
|
| 138 |
+
# st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}")
|
| 139 |
+
fig.update_layout(
|
| 140 |
+
barmode="group",
|
| 141 |
+
title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
|
| 142 |
+
xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}",
|
| 143 |
+
yaxis_title="Average Score",
|
| 144 |
+
)
|
| 145 |
+
st.plotly_chart(fig)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
return selected_instructor, selected_instructor_comments
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
except Exception as e:
|
| 153 |
+
st.error(f"An error occurred during data analytics: {str(e)}")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# try:
|
| 157 |
+
# # Execute the SQL query to fetch the evaluation data
|
| 158 |
+
# cursor.execute("SELECT * FROM evaluation")
|
| 159 |
+
# evaluation_data = cursor.fetchall()
|
| 160 |
+
|
| 161 |
+
# if not evaluation_data:
|
| 162 |
+
# st.warning("No evaluation data found.")
|
| 163 |
+
# else:
|
| 164 |
+
# # Create a DataFrame from the fetched data and set column names
|
| 165 |
+
# column_names = [i[0] for i in cursor.description]
|
| 166 |
+
# df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 167 |
+
|
| 168 |
+
# # Group data by instructor and calculate average scores per criteria
|
| 169 |
+
# instructor_avg_scores = df.groupby("inst_id").agg({
|
| 170 |
+
# "score_criteria_1": "mean",
|
| 171 |
+
# "score_criteria_2": "mean",
|
| 172 |
+
# "score_criteria_3": "mean"
|
| 173 |
+
# }).reset_index()
|
| 174 |
+
|
| 175 |
+
# # Join with instructor data to get their names
|
| 176 |
+
# cursor.execute("SELECT inst_id, inst_name FROM instructor")
|
| 177 |
+
# instructor_data = cursor.fetchall()
|
| 178 |
+
# instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"])
|
| 179 |
+
# instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
|
| 180 |
+
|
| 181 |
+
# # Join with subj_inst and subject tables to get subject names
|
| 182 |
+
# cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code")
|
| 183 |
+
# subject_data = cursor.fetchall()
|
| 184 |
+
# subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"])
|
| 185 |
+
# df = df.merge(subject_df, on="subj_inst_id", how="left")
|
| 186 |
+
|
| 187 |
+
# # Create a select box to filter by instructor and subject
|
| 188 |
+
# selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique())
|
| 189 |
+
# selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique()
|
| 190 |
+
# selected_subject = st.selectbox("Select Subject", selected_subjects)
|
| 191 |
+
|
| 192 |
+
# # Filter data based on the selected instructor and subject
|
| 193 |
+
# filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) &
|
| 194 |
+
# (df["sub_name"] == selected_subject)]
|
| 195 |
+
|
| 196 |
+
# # Create a bar chart for average scores per criteria
|
| 197 |
+
# fig = px.bar(instructor_avg_scores, x="instructor_name",
|
| 198 |
+
# y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
|
| 199 |
+
# labels={"value": "Average Score", "variable": "Criteria"},
|
| 200 |
+
# title="Average Scores per Criteria by Instructor")
|
| 201 |
+
# st.plotly_chart(fig)
|
| 202 |
+
|
| 203 |
+
# # Group data by subject instructor and calculate average scores
|
| 204 |
+
# subject_avg_scores = filtered_data.groupby("sub_name").agg({
|
| 205 |
+
# "score_criteria_1": "mean",
|
| 206 |
+
# "score_criteria_2": "mean",
|
| 207 |
+
# "score_criteria_3": "mean"
|
| 208 |
+
# }).reset_index()
|
| 209 |
+
|
| 210 |
+
# # Create a bar chart for average scores per criteria for the selected subject
|
| 211 |
+
# fig = px.bar(subject_avg_scores, x="sub_name",
|
| 212 |
+
# y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
|
| 213 |
+
# labels={"value": "Average Score", "variable": "Criteria"},
|
| 214 |
+
# title=f"Average Scores per Criteria for Subject {selected_subject}")
|
| 215 |
+
# st.plotly_chart(fig)
|
| 216 |
+
|
| 217 |
+
# except Exception as e:
|
| 218 |
+
# st.error(f"An error occurred during data analytics: {str(e)}")
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def evaluation(cursor, table_name):
|
| 226 |
+
try:
|
| 227 |
+
# Execute the SQL query to fetch the evaluation data
|
| 228 |
+
cursor.execute("SELECT * FROM evaluation")
|
| 229 |
+
evaluation_data = cursor.fetchall()
|
| 230 |
+
|
| 231 |
+
if not evaluation_data:
|
| 232 |
+
st.warning("No evaluation data found.")
|
| 233 |
+
else:
|
| 234 |
+
# Create a DataFrame from the fetched data and set column names
|
| 235 |
+
column_names = [i[0] for i in cursor.description]
|
| 236 |
+
df = pd.DataFrame(evaluation_data, columns=column_names)
|
| 237 |
+
|
| 238 |
+
# # Display the table with centered text
|
| 239 |
+
# st.header(f"{table_name} Table")
|
| 240 |
+
# st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
|
| 241 |
+
|
| 242 |
+
analyze_instructors_results = analyze_instructors(cursor)
|
| 243 |
+
|
| 244 |
+
if st.button("Analyze comments"):
|
| 245 |
+
# st.write(analyze_instructors_results[0], analyze_instructors_results[1])
|
| 246 |
+
eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1])
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
except Exception as e:
|
| 250 |
+
st.error(f"An error occurred while fetching evaluation data: {str(e)}")
|
app5_selectbox/evaluation.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import plotly.graph_objs as go
|
| 4 |
+
import time
|
| 5 |
+
import plotly.express as px
|
| 6 |
+
import ast
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
from app5_selectbox.database_con import cursor, db_connection
|
| 11 |
+
from app5_selectbox.app5_selectbox_func import generate_unique_4
|
| 12 |
+
from app5_selectbox.evaluation_analysis import eval_analysis
|
| 13 |
+
# from app5_selectbox.evaluation_analysis_g4f import eval_analysis
|
| 14 |
+
|
| 15 |
+
# from app5_selectbox.langchain_llama_gpu import llm_chain
|
| 16 |
+
from app5_selectbox.g4f_prompt import g4f_prompt
|
| 17 |
+
|
| 18 |
+
# Function to fetch evaluation data
|
| 19 |
+
def fetch_evaluation_data():
|
| 20 |
+
cursor.execute("SELECT * FROM evaluation")
|
| 21 |
+
evaluation_data = cursor.fetchall()
|
| 22 |
+
if not evaluation_data:
|
| 23 |
+
st.warning("No evaluation data found.")
|
| 24 |
+
return None
|
| 25 |
+
column_names = [i[0] for i in cursor.description]
|
| 26 |
+
return pd.DataFrame(evaluation_data, columns=column_names)
|
| 27 |
+
|
| 28 |
+
# Function to analyze instructors
|
| 29 |
+
def analyze_instructors(evaluation_df):
|
| 30 |
+
|
| 31 |
+
if evaluation_df is None:
|
| 32 |
+
return
|
| 33 |
+
|
| 34 |
+
column_names = evaluation_df.columns[4:14]
|
| 35 |
+
criteria_labels = [column.replace("_", " ") for column in column_names]
|
| 36 |
+
|
| 37 |
+
cursor.execute("SELECT * FROM instructor")
|
| 38 |
+
instructor_data = cursor.fetchall()
|
| 39 |
+
|
| 40 |
+
# st.write(instructor_data)
|
| 41 |
+
|
| 42 |
+
instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor name","program code","user name","password"])
|
| 43 |
+
instructor_avg_scores = evaluation_df.groupby("inst_id")[column_names].mean().reset_index()
|
| 44 |
+
instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# st.write(instructor_avg_scores)
|
| 48 |
+
# programs_list = sorted(instructor_avg_scores["program code"].unique())
|
| 49 |
+
|
| 50 |
+
# Fetch program options from the program table
|
| 51 |
+
cursor.execute("SELECT prog_id, prog_code, prog_name FROM program")
|
| 52 |
+
selected_program = pd.DataFrame(cursor.fetchall(), columns=["prog_id", "prog_code", "prog_name"])
|
| 53 |
+
# st.write(selected_program)
|
| 54 |
+
# st.write(list({str(prog): prog[0] for prog in program_options}))
|
| 55 |
+
selected_program_select = st.selectbox("Select Program", selected_program["prog_code"])
|
| 56 |
+
# selected_program = ast.literal_eval(str(selected_program))
|
| 57 |
+
|
| 58 |
+
# selected_program = st.selectbox("Select Program", programs_list)
|
| 59 |
+
filtered_instructor_list = pd.DataFrame(instructor_avg_scores)
|
| 60 |
+
# st.write(filtered_instructor_list)
|
| 61 |
+
mask = filtered_instructor_list["program code"] == selected_program.loc[selected_program['prog_code'] == selected_program_select, 'prog_id'].values[0]
|
| 62 |
+
# st.write(mask)
|
| 63 |
+
filtered_instructor_list = filtered_instructor_list.loc[mask]
|
| 64 |
+
|
| 65 |
+
# st.write(filtered_instructor_list)
|
| 66 |
+
instructors_list = sorted(filtered_instructor_list["instructor name"].unique())
|
| 67 |
+
# print(type(instructor_avg_scores))
|
| 68 |
+
|
| 69 |
+
# instructors_list = instructor_avg_scores.query("program code == {selected_program}")
|
| 70 |
+
# st.write(len(instructors_list)) # df to graph
|
| 71 |
+
|
| 72 |
+
selected_instructor = st.selectbox("Select Instructor", instructors_list)
|
| 73 |
+
|
| 74 |
+
filtered_data = evaluation_df[evaluation_df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst_id"].values[0]]
|
| 75 |
+
|
| 76 |
+
selected_instructor_comments = list(filtered_data["comments"])
|
| 77 |
+
st.write(filtered_data)
|
| 78 |
+
st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
|
| 79 |
+
|
| 80 |
+
cursor.execute("""
|
| 81 |
+
SELECT subj_inst.subj_inst_id, subject.sub_name
|
| 82 |
+
FROM subj_inst
|
| 83 |
+
INNER JOIN subject
|
| 84 |
+
ON subj_inst.sub_id_code = subject.sub_id_code
|
| 85 |
+
""")
|
| 86 |
+
|
| 87 |
+
# Assuming you have a DataFrame named 'filtered_data'
|
| 88 |
+
# and column_names is a list of column names you want to consider for calculating average scores
|
| 89 |
+
|
| 90 |
+
# Convert all columns to numeric data
|
| 91 |
+
filtered_data[column_names] = filtered_data[column_names].apply(pd.to_numeric, errors='coerce')
|
| 92 |
+
|
| 93 |
+
# Fetch subject data from the cursor
|
| 94 |
+
subject_data = cursor.fetchall()
|
| 95 |
+
|
| 96 |
+
# Create a DataFrame for subject data
|
| 97 |
+
subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub name"])
|
| 98 |
+
|
| 99 |
+
# Merge subject data with filtered data based on 'subj_inst_id'
|
| 100 |
+
filtered_data = filtered_data.merge(subject_df, on="subj_inst_id", how="left")
|
| 101 |
+
|
| 102 |
+
# Group by subject name and calculate average scores
|
| 103 |
+
subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
|
| 104 |
+
|
| 105 |
+
# Calculate total average and add it as a new column
|
| 106 |
+
subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
|
| 107 |
+
|
| 108 |
+
subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
|
| 109 |
+
subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
|
| 110 |
+
|
| 111 |
+
fig = go.Figure()
|
| 112 |
+
|
| 113 |
+
for criterion, label in zip(column_names, criteria_labels):
|
| 114 |
+
fig.add_trace(go.Bar(
|
| 115 |
+
x=subject_avg_scores["sub name"],
|
| 116 |
+
y=subject_avg_scores[criterion],
|
| 117 |
+
name=label,
|
| 118 |
+
))
|
| 119 |
+
|
| 120 |
+
# Add the total average score above the bars
|
| 121 |
+
total_average = subject_avg_scores["total average"].mean()
|
| 122 |
+
fig.add_trace(go.Scatter(
|
| 123 |
+
x=subject_avg_scores["sub name"],
|
| 124 |
+
y=subject_avg_scores["total average"],
|
| 125 |
+
mode="markers+text",
|
| 126 |
+
text=round(subject_avg_scores["total average"], 2),
|
| 127 |
+
textposition="top center",
|
| 128 |
+
textfont=dict(size=14),
|
| 129 |
+
marker=dict(size=10, color="black"),
|
| 130 |
+
name="Total Average",
|
| 131 |
+
))
|
| 132 |
+
|
| 133 |
+
fig.update_layout(
|
| 134 |
+
width=1000,height=600,
|
| 135 |
+
barmode="group",
|
| 136 |
+
title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
|
| 137 |
+
xaxis_title=f"Overall Average Score (All Subjects): {total_average:.2f}",
|
| 138 |
+
yaxis_title="Average Score",
|
| 139 |
+
)
|
| 140 |
+
st.plotly_chart(fig)
|
| 141 |
+
|
| 142 |
+
results_to_prompt = "Average score per Criteria\n"
|
| 143 |
+
criteria_averages = [(criteria.replace("_", " "), round(filtered_data[criteria].mean(), 2)) for criteria in column_names]
|
| 144 |
+
for criteria, average_score in criteria_averages:
|
| 145 |
+
results_to_prompt += f"{criteria}: {average_score}/5, \n"
|
| 146 |
+
|
| 147 |
+
fig = go.Figure()
|
| 148 |
+
fig.add_trace(go.Bar(
|
| 149 |
+
x=[criteria for criteria, _ in criteria_averages],
|
| 150 |
+
y=[average_score for _, average_score in criteria_averages],
|
| 151 |
+
text=[f"{average_score}/5" for _, average_score in criteria_averages],
|
| 152 |
+
))
|
| 153 |
+
|
| 154 |
+
fig.update_layout(
|
| 155 |
+
width=1000,
|
| 156 |
+
title="Average Score per Criteria",
|
| 157 |
+
xaxis_title="Criteria",
|
| 158 |
+
yaxis_title="Average Score",
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
st.plotly_chart(fig)
|
| 162 |
+
results_to_prompt = f"""
|
| 163 |
+
Based from these over-all average score please Analyze it and provide short insights: {str(results_to_prompt)}.
|
| 164 |
+
Make it in sentence type and in English language only.
|
| 165 |
+
|
| 166 |
+
"""
|
| 167 |
+
while True:
|
| 168 |
+
try:
|
| 169 |
+
with st.spinner("Analyzing... "):
|
| 170 |
+
# st.write(llm_chain.run(prompt))
|
| 171 |
+
st.write(g4f_prompt(results_to_prompt)) #############################
|
| 172 |
+
st.success("Analyzing Complete!")
|
| 173 |
+
break
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
# Handle the error (e.g., log it or take appropriate action)
|
| 177 |
+
# Sleep for a moment before retrying
|
| 178 |
+
# st.write("Error occurred.. Retrying")
|
| 179 |
+
pass
|
| 180 |
+
# time.sleep(0.4)
|
| 181 |
+
# Add pie graph of evaluation distribution per student's section
|
| 182 |
+
# Fetch program options from the program table
|
| 183 |
+
cursor.execute(f"""
|
| 184 |
+
SELECT
|
| 185 |
+
pr.prog_code || '-' || c.class_year || '-' || c.class_section AS merged_result,
|
| 186 |
+
COUNT(*) AS occurrence_count
|
| 187 |
+
FROM
|
| 188 |
+
student s
|
| 189 |
+
JOIN
|
| 190 |
+
class c ON s.class_id = c.class_id
|
| 191 |
+
JOIN
|
| 192 |
+
program pr ON c.prog_id = pr.prog_id
|
| 193 |
+
WHERE
|
| 194 |
+
s.stud_id IN {tuple(list(filtered_data["stud_id"]))}
|
| 195 |
+
GROUP BY
|
| 196 |
+
s.class_id, pr.prog_code, c.class_year, c.class_section;
|
| 197 |
+
|
| 198 |
+
""")
|
| 199 |
+
|
| 200 |
+
merged_result = pd.DataFrame(cursor.fetchall(), columns=["merged_result", "occurrence_count"])
|
| 201 |
+
st.write(filtered_data)
|
| 202 |
+
st.write(merged_result)
|
| 203 |
+
# section_counts = filtered_data["stud_id"].value_counts()
|
| 204 |
+
# st.write(section_counts)
|
| 205 |
+
|
| 206 |
+
fig = px.pie(
|
| 207 |
+
merged_result,
|
| 208 |
+
values="occurrence_count",
|
| 209 |
+
names="merged_result",
|
| 210 |
+
title="Evaluation Distribution per Student's Section",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
# Add percentage and occurrence_count to the hover information
|
| 214 |
+
fig.update_traces(
|
| 215 |
+
hovertemplate="%{label}: %{percent} <br>Occurrence Count: %{value}",
|
| 216 |
+
textinfo="percent+value",
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
fig.update_layout(
|
| 220 |
+
width=600,
|
| 221 |
+
height=600,
|
| 222 |
+
font=dict(size=20),
|
| 223 |
+
)
|
| 224 |
+
st.plotly_chart(fig)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
cursor.execute(f"""
|
| 229 |
+
SELECT
|
| 230 |
+
s.class_id,
|
| 231 |
+
pr.prog_code || '-' || c.class_year || '-' || c.class_section AS class_info,
|
| 232 |
+
COUNT(DISTINCT s.stud_id) AS num_respondents,
|
| 233 |
+
ROUND((AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
|
| 234 |
+
AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
|
| 235 |
+
AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
|
| 236 |
+
AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10, 2) AS avg_overall,
|
| 237 |
+
ROUND((COUNT(DISTINCT s.stud_id) * (AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
|
| 238 |
+
AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
|
| 239 |
+
AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
|
| 240 |
+
AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10), 2) AS weighted_avg_overall
|
| 241 |
+
FROM
|
| 242 |
+
evaluation e
|
| 243 |
+
JOIN
|
| 244 |
+
student s ON e.stud_id = s.stud_id
|
| 245 |
+
JOIN
|
| 246 |
+
class c ON s.class_id = c.class_id
|
| 247 |
+
JOIN
|
| 248 |
+
program pr ON c.prog_id = pr.prog_id
|
| 249 |
+
WHERE
|
| 250 |
+
s.stud_id IN {tuple(list(filtered_data["stud_id"]))}
|
| 251 |
+
GROUP BY
|
| 252 |
+
s.class_id, pr.prog_code, c.class_year, c.class_section, class_info;
|
| 253 |
+
""")
|
| 254 |
+
|
| 255 |
+
avg_scores_per_class = pd.DataFrame(cursor.fetchall(), columns=[
|
| 256 |
+
"class_id",
|
| 257 |
+
"class_info",
|
| 258 |
+
"num_respondents",
|
| 259 |
+
"avg_overall",
|
| 260 |
+
"weighted_avg_overall"
|
| 261 |
+
])
|
| 262 |
+
|
| 263 |
+
# Calculate the last row's weighted_avg_overall / num_respondents
|
| 264 |
+
last_row_index = avg_scores_per_class["weighted_avg_overall"].last_valid_index()
|
| 265 |
+
if last_row_index is not None:
|
| 266 |
+
avg_scores_per_class.at[last_row_index, "weighted_avg_overall"] /= avg_scores_per_class.at[last_row_index, "num_respondents"]
|
| 267 |
+
|
| 268 |
+
# Convert the column to decimal.Decimal before rounding
|
| 269 |
+
avg_scores_per_class["weighted_avg_overall"] = avg_scores_per_class["num_respondents"] * avg_scores_per_class["avg_overall"] # avg_scores_per_class["weighted_avg_overall"].apply(lambda x: round(float(x), 2))
|
| 270 |
+
|
| 271 |
+
# Drop rows with None values
|
| 272 |
+
avg_scores_per_class = avg_scores_per_class.dropna()
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# Calculate the overall averages for avg_overall and weighted_avg_overall
|
| 276 |
+
num_respondents = round(avg_scores_per_class["num_respondents"].sum(), 2)
|
| 277 |
+
overall_avg_overall = round(avg_scores_per_class["avg_overall"].mean(), 2)
|
| 278 |
+
overall_weighted_avg_overall = round(avg_scores_per_class["weighted_avg_overall"].sum(),2)
|
| 279 |
+
weighted_avg_overall = round(overall_weighted_avg_overall / num_respondents,2)
|
| 280 |
+
|
| 281 |
+
# # Append an additional row for avg_overall and weighted_avg_overall
|
| 282 |
+
# avg_scores_per_class = avg_scores_per_class.append({
|
| 283 |
+
# "class_id": int(avg_scores_per_class["class_id"].max()) + 1,
|
| 284 |
+
# "class_info": "Total",
|
| 285 |
+
# "num_respondents": avg_scores_per_class["num_respondents"].sum(),
|
| 286 |
+
# "avg_overall": round(overall_avg_overall, 2),
|
| 287 |
+
# "weighted_avg_overall": round(overall_weighted_avg_overall / avg_scores_per_class["num_respondents"].sum(), 2)
|
| 288 |
+
# }, ignore_index=True)
|
| 289 |
+
|
| 290 |
+
# st.write(avg_scores_per_class.style.set_properties(**{'text-align': 'center'}))
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
# Add summary rows to the DataFrame
|
| 295 |
+
avg_scores_per_class = avg_scores_per_class.append({
|
| 296 |
+
"class_id": "",
|
| 297 |
+
"class_info": "Summary",
|
| 298 |
+
"num_respondents": num_respondents,
|
| 299 |
+
"avg_overall": " ",
|
| 300 |
+
"weighted_avg_overall": overall_weighted_avg_overall
|
| 301 |
+
}, ignore_index=True)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def calculate_satisfaction(weighted_avg_overall):
|
| 305 |
+
if weighted_avg_overall > 4:
|
| 306 |
+
return "Outstanding"
|
| 307 |
+
elif weighted_avg_overall > 3:
|
| 308 |
+
return "Above Average"
|
| 309 |
+
elif weighted_avg_overall > 2:
|
| 310 |
+
return "Average"
|
| 311 |
+
elif weighted_avg_overall > 1:
|
| 312 |
+
return "Below Average"
|
| 313 |
+
else:
|
| 314 |
+
return "Unsatisfactory"
|
| 315 |
+
|
| 316 |
+
def highlight_cell(col, col_label, row_label):
|
| 317 |
+
# check if col is a column we want to highlight
|
| 318 |
+
if col.name == col_label:
|
| 319 |
+
# a boolean mask where True represents a row we want to highlight
|
| 320 |
+
mask = (col.index == row_label)
|
| 321 |
+
# return an array of string styles (e.g. ["", "background-color: yellow"])
|
| 322 |
+
return ["background-color: lightgreen" if val_bool else "" for val_bool in mask]
|
| 323 |
+
else:
|
| 324 |
+
# return an array of empty strings that has the same size as col (e.g. ["",""])
|
| 325 |
+
return np.full_like(col, "", dtype="str")
|
| 326 |
+
|
| 327 |
+
avg_scores_per_class = avg_scores_per_class.append({
|
| 328 |
+
"class_id": "",
|
| 329 |
+
"class_info": "Weighted Avg.",
|
| 330 |
+
"num_respondents": " ", # You can set this to "N/A" or any appropriate value
|
| 331 |
+
"avg_overall": calculate_satisfaction(weighted_avg_overall), # You can set this to "N/A" or any appropriate value
|
| 332 |
+
"weighted_avg_overall": weighted_avg_overall
|
| 333 |
+
}, ignore_index=True)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# st.dataframe(avg_scores_per_class.style.background_gradient(subset=["C"], cmap="RdYlGn", vmin=0, vmax=2.5))
|
| 337 |
+
avg_scores_per_class =avg_scores_per_class.style.apply(highlight_cell, col_label="avg_overall", row_label=9)
|
| 338 |
+
|
| 339 |
+
st.write(avg_scores_per_class)
|
| 340 |
+
st.write(f"Number of respondents: {num_respondents}")
|
| 341 |
+
st.write(f"Overall weighted avg.: {overall_weighted_avg_overall}")
|
| 342 |
+
st.write(f"Weighted avg overall: {weighted_avg_overall}")
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# if st.button("Analyze the results", key="analyze_results"):
|
| 349 |
+
|
| 350 |
+
for subject in subject_avg_scores["sub name"]:
|
| 351 |
+
with st.expander(subject):
|
| 352 |
+
subject_filtered_data = filtered_data[filtered_data["sub name"] == subject]
|
| 353 |
+
promt_txt = ""
|
| 354 |
+
fig = go.Figure()
|
| 355 |
+
|
| 356 |
+
# st.write(subject_filtered_data) # displays DF for every graphs
|
| 357 |
+
for criterion, label in zip(column_names, criteria_labels):
|
| 358 |
+
text = round(subject_filtered_data[criterion].mean(),2)
|
| 359 |
+
fig.add_trace(go.Bar(
|
| 360 |
+
x=[label],
|
| 361 |
+
y=[text],
|
| 362 |
+
text=text,
|
| 363 |
+
name=label,
|
| 364 |
+
))
|
| 365 |
+
promt_txt += criterion.replace("_", " ") + ": " + str(text)+ "\n"
|
| 366 |
+
# st.text(promt_txt) # prompt per graph
|
| 367 |
+
|
| 368 |
+
total_average = subject_filtered_data[column_names].mean(axis=1).mean()
|
| 369 |
+
|
| 370 |
+
total_average_txt = f"{subject} Average Score: {round(total_average,2)}/5"
|
| 371 |
+
fig.update_layout(
|
| 372 |
+
barmode="group",
|
| 373 |
+
width=1000,
|
| 374 |
+
title=total_average_txt,
|
| 375 |
+
yaxis_title="Average Score",
|
| 376 |
+
)
|
| 377 |
+
st.plotly_chart(fig)
|
| 378 |
+
|
| 379 |
+
prompt = f"generate a very short insights about this faculty evaluation result for the subject {subject}?\n{promt_txt}\nplease strictly shorten your response in sentence format"
|
| 380 |
+
# st.text(prompt)
|
| 381 |
+
while True:
|
| 382 |
+
with st.spinner("Generating Recommendation"):
|
| 383 |
+
try:
|
| 384 |
+
st.write(g4f_prompt(prompt)) #############################
|
| 385 |
+
# pass
|
| 386 |
+
# break
|
| 387 |
+
break
|
| 388 |
+
except Exception as e:
|
| 389 |
+
# Handle the error (e.g., log it or take appropriate action)
|
| 390 |
+
# Sleep for a moment before retrying
|
| 391 |
+
# st.write("Error occurred.. Retrying")
|
| 392 |
+
pass
|
| 393 |
+
# time.sleep(0.4)
|
| 394 |
+
|
| 395 |
+
return selected_instructor, selected_instructor_comments, results_to_prompt
|
| 396 |
+
|
| 397 |
+
def evaluation():
|
| 398 |
+
|
| 399 |
+
try:
|
| 400 |
+
evaluation_df = fetch_evaluation_data()
|
| 401 |
+
if evaluation_df is not None:
|
| 402 |
+
analyze_instructors_results = analyze_instructors(evaluation_df)
|
| 403 |
+
# if st.button("Analyze comments"):
|
| 404 |
+
# eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2])
|
| 405 |
+
|
| 406 |
+
with st.expander("Sentiment Analysis"):
|
| 407 |
+
|
| 408 |
+
eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2]) #############################
|
| 409 |
+
# pass
|
| 410 |
+
|
| 411 |
+
except Exception as e:
|
| 412 |
+
st.error(f"An error occurred: {str(e)}")
|
app5_selectbox/evaluation_analysis copy 2.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gspread
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from oauth2client.service_account import ServiceAccountCredentials
|
| 4 |
+
from transformers import BertForSequenceClassification, BertTokenizer
|
| 5 |
+
import torch
|
| 6 |
+
import streamlit as st
|
| 7 |
+
from matplotlib import pyplot as plt
|
| 8 |
+
import numpy as np
|
| 9 |
+
from wordcloud import WordCloud
|
| 10 |
+
from PIL import ImageFont
|
| 11 |
+
# from app5_selectbox.langchain_llama_gpu import llm_chain
|
| 12 |
+
from app5_selectbox.g4f_prompt import g4f_prompt
|
| 13 |
+
|
| 14 |
+
# # Load the model and tokenizer
|
| 15 |
+
# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
|
| 16 |
+
# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
|
| 17 |
+
|
| 18 |
+
def eval_analysis(Instructor, Instructor_comment, criteria_results):
|
| 19 |
+
# # Authenticate with Google Sheets API
|
| 20 |
+
# scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
|
| 21 |
+
# creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope)
|
| 22 |
+
# client = gspread.authorize(creds)
|
| 23 |
+
|
| 24 |
+
# # Open the spreadsheet by its title
|
| 25 |
+
# spreadsheet = client.open('survey (Responses)')
|
| 26 |
+
|
| 27 |
+
# # Select a specific worksheet
|
| 28 |
+
# worksheet = spreadsheet.worksheet('Form Responses 1')
|
| 29 |
+
|
| 30 |
+
# # Read data from the worksheet
|
| 31 |
+
# data = worksheet.get_all_values()
|
| 32 |
+
|
| 33 |
+
# # Create a Pandas DataFrame from the data
|
| 34 |
+
# df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers
|
| 35 |
+
# df = df.iloc[:, [1, 2]] # Filter columns
|
| 36 |
+
|
| 37 |
+
# #
|
| 38 |
+
# instructor_list = df.iloc[:, 0].unique()
|
| 39 |
+
# instructor_list = sorted(instructor_list)
|
| 40 |
+
# # print(instructor_list)
|
| 41 |
+
|
| 42 |
+
# # Create a dropdown widget in the sidebar
|
| 43 |
+
# option = st.sidebar.selectbox("Select an option", instructor_list)
|
| 44 |
+
|
| 45 |
+
# # Filter rows containing "Instructor 1"
|
| 46 |
+
# Instructor = df[df['Instructor'] == option]
|
| 47 |
+
# Instructor_comment = Instructor['comment'].tolist()
|
| 48 |
+
# ##################################################### BERT MODEL
|
| 49 |
+
# def perform_sentiment_analysis(text):
|
| 50 |
+
# inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
| 51 |
+
# with torch.no_grad():
|
| 52 |
+
# outputs = model(**inputs)
|
| 53 |
+
# logits = outputs.logits
|
| 54 |
+
# predicted_class = torch.argmax(logits, dim=1).item()
|
| 55 |
+
# sentiment_labels = ["negative", "neutral", "positive"]
|
| 56 |
+
# sentiment = sentiment_labels[predicted_class]
|
| 57 |
+
# return sentiment
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# from transformers import BertForSequenceClassification, BertTokenizer
|
| 61 |
+
|
| 62 |
+
# # Load the model and tokenizer
|
| 63 |
+
# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
|
| 64 |
+
# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
|
| 65 |
+
|
| 66 |
+
# # sample_texts_tfidf = vectorizer.transform(sample_texts)
|
| 67 |
+
# # sample_predictions = classifier.predict(sample_texts_tfidf)
|
| 68 |
+
|
| 69 |
+
# sample_predictions = []
|
| 70 |
+
|
| 71 |
+
# # Initialize counters for sentiment classes
|
| 72 |
+
# negative_count = 0
|
| 73 |
+
# neutral_count = 0
|
| 74 |
+
# positive_count = 0
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# for text in Instructor_comment:
|
| 78 |
+
# predicted_class = perform_sentiment_analysis(text)
|
| 79 |
+
# print(f"Text: {text}")
|
| 80 |
+
# print(f"Predicted Sentiment: {predicted_class}")
|
| 81 |
+
# sample_predictions.append(predicted_class)
|
| 82 |
+
# if predicted_class == "negative":
|
| 83 |
+
# negative_count += 1
|
| 84 |
+
# elif predicted_class == "neutral":
|
| 85 |
+
# neutral_count += 1
|
| 86 |
+
# else:
|
| 87 |
+
# positive_count += 1
|
| 88 |
+
|
| 89 |
+
# print(f'negative_count {negative_count}')
|
| 90 |
+
# print(f'neutral_count {neutral_count}')
|
| 91 |
+
# print(f'positive_count {positive_count}')
|
| 92 |
+
|
| 93 |
+
################################################### scikit learn model
|
| 94 |
+
|
| 95 |
+
# import joblib
|
| 96 |
+
# # Load the model and vectorizer for predictions
|
| 97 |
+
# loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl")
|
| 98 |
+
|
| 99 |
+
# # Transform the new text data using the loaded vectorizer
|
| 100 |
+
# new_text_features = loaded_vectorizer.transform(Instructor_comment)
|
| 101 |
+
|
| 102 |
+
# # Make predictions using the loaded model
|
| 103 |
+
# predicted_class = loaded_model.predict(new_text_features)
|
| 104 |
+
# # print(f"Predicted class: {predicted_class}")
|
| 105 |
+
|
| 106 |
+
# sample_predictions = []
|
| 107 |
+
|
| 108 |
+
# # Initialize counters for sentiment classes
|
| 109 |
+
# negative_count = 0
|
| 110 |
+
# neutral_count = 0
|
| 111 |
+
# positive_count = 0
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
# for text, prediction in zip(Instructor_comment, predicted_class):
|
| 115 |
+
# print(f"Text: {text}")
|
| 116 |
+
# print(f"Predicted Sentiment: {prediction}")
|
| 117 |
+
# sample_predictions.append(prediction)
|
| 118 |
+
# if prediction == "negative":
|
| 119 |
+
# negative_count += 1
|
| 120 |
+
# elif prediction == "neutral":
|
| 121 |
+
# neutral_count += 1
|
| 122 |
+
# else:
|
| 123 |
+
# positive_count += 1
|
| 124 |
+
|
| 125 |
+
# print(f'negative_count {negative_count}')
|
| 126 |
+
# print(f'neutral_count {neutral_count}')
|
| 127 |
+
# print(f'positive_count {positive_count}')
|
| 128 |
+
|
| 129 |
+
################################################### bert2 model
|
| 130 |
+
import torch
|
| 131 |
+
from transformers import BertTokenizer, BertForSequenceClassification
|
| 132 |
+
import numpy as np
|
| 133 |
+
|
| 134 |
+
# Load the saved model
|
| 135 |
+
loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model')
|
| 136 |
+
tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Encode the sample comments
|
| 140 |
+
sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt')
|
| 141 |
+
|
| 142 |
+
# Make predictions on the sample comments
|
| 143 |
+
sample_input_ids = sample_encodings['input_ids']
|
| 144 |
+
sample_attention_mask = sample_encodings['attention_mask']
|
| 145 |
+
|
| 146 |
+
with torch.no_grad():
|
| 147 |
+
sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask)
|
| 148 |
+
|
| 149 |
+
# Get predicted labels
|
| 150 |
+
sample_logits = sample_outputs.logits
|
| 151 |
+
sample_predictions = np.argmax(sample_logits, axis=1)
|
| 152 |
+
|
| 153 |
+
# Map predicted labels back to sentiment labels
|
| 154 |
+
sentiment_labels = ['negative', 'positive']
|
| 155 |
+
predicted_sentiments = [sentiment_labels[label] for label in sample_predictions]
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# # Print the comments and predicted sentiments
|
| 159 |
+
# for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
|
| 160 |
+
# print(f"Comment: {comment}")
|
| 161 |
+
# print(f"Predicted Sentiment: {sentiment}")
|
| 162 |
+
# print()
|
| 163 |
+
|
| 164 |
+
sample_predictions = []
|
| 165 |
+
|
| 166 |
+
# Initialize counters for sentiment classes
|
| 167 |
+
negative_count = 0
|
| 168 |
+
neutral_count = 0
|
| 169 |
+
positive_count = 0
|
| 170 |
+
|
| 171 |
+
# print(predicted_sentiments)
|
| 172 |
+
# print(Instructor_comment)
|
| 173 |
+
|
| 174 |
+
for text, prediction in zip(Instructor_comment, predicted_sentiments):
|
| 175 |
+
print(f"Text: {text}")
|
| 176 |
+
print(f"Predicted Sentiment: {prediction}")
|
| 177 |
+
sample_predictions.append(prediction)
|
| 178 |
+
if prediction == "negative":
|
| 179 |
+
negative_count += 1
|
| 180 |
+
elif prediction == "neutral":
|
| 181 |
+
neutral_count += 1
|
| 182 |
+
else:
|
| 183 |
+
positive_count += 1
|
| 184 |
+
|
| 185 |
+
print(f'negative_count {negative_count}')
|
| 186 |
+
print(f'neutral_count {neutral_count}')
|
| 187 |
+
print(f'positive_count {positive_count}')
|
| 188 |
+
|
| 189 |
+
###################################################
|
| 190 |
+
|
| 191 |
+
# Create a Streamlit app
|
| 192 |
+
st.title("Sentiment Analysis Dashboard")
|
| 193 |
+
st.sidebar.header("Settings")
|
| 194 |
+
|
| 195 |
+
link_text = "Instructor Survey"
|
| 196 |
+
link_url = "https://forms.gle/64n9CXMDRP2NYgZYA"
|
| 197 |
+
st.sidebar.markdown(f"[{link_text}]({link_url})")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# Display sentiment counts
|
| 201 |
+
st.write("### Sentiment Counts")
|
| 202 |
+
st.write(f"Negative: {negative_count}")
|
| 203 |
+
# st.write(f"Neutral: {neutral_count}")
|
| 204 |
+
st.write(f"Positive: {positive_count}")
|
| 205 |
+
|
| 206 |
+
# Plot sentiment distribution
|
| 207 |
+
sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts()
|
| 208 |
+
desired_order = ['positive',
|
| 209 |
+
# 'neutral',
|
| 210 |
+
'negative']
|
| 211 |
+
sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
|
| 212 |
+
percentage_distribution = sentiment_counts / len(sample_predictions) * 100
|
| 213 |
+
|
| 214 |
+
st.write("### Sentiment Distribution")
|
| 215 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
| 216 |
+
bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red'])
|
| 217 |
+
plt.xlabel('Sentiment')
|
| 218 |
+
plt.ylabel('Count')
|
| 219 |
+
plt.title('Sentiment Distribution in Sample Predictions')
|
| 220 |
+
plt.xticks(rotation=45)
|
| 221 |
+
for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order):
|
| 222 |
+
height = bar.get_height()
|
| 223 |
+
ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom')
|
| 224 |
+
st.pyplot(fig)
|
| 225 |
+
|
| 226 |
+
st.set_option('deprecation.showPyplotGlobalUse', False)
|
| 227 |
+
|
| 228 |
+
# Generate word clouds based on sentiment categories
|
| 229 |
+
sentiment_texts = {
|
| 230 |
+
'positive': [],
|
| 231 |
+
# 'neutral': [],
|
| 232 |
+
'negative': []
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
for text, sentiment in zip(Instructor_comment, sample_predictions):
|
| 236 |
+
sentiment_texts[sentiment].append(text)
|
| 237 |
+
|
| 238 |
+
text_for_llama = ""
|
| 239 |
+
|
| 240 |
+
for sentiment, texts in sentiment_texts.items():
|
| 241 |
+
combined_texts = ' '.join(texts)
|
| 242 |
+
combined_texts = combined_texts.split()
|
| 243 |
+
filtered_words = [word for word in combined_texts if len(word) > 2]
|
| 244 |
+
combined_texts = ' '.join(filtered_words)
|
| 245 |
+
if combined_texts =="": continue
|
| 246 |
+
# Load your custom TrueType font using PIL
|
| 247 |
+
font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file
|
| 248 |
+
# custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed
|
| 249 |
+
# Set the font family to use the TrueType font
|
| 250 |
+
# font = ImageFont.truetype(font_path)
|
| 251 |
+
|
| 252 |
+
wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts)
|
| 253 |
+
st.write(f"### Word Cloud for {sentiment} Sentiment")
|
| 254 |
+
plt.figure(figsize=(10, 6))
|
| 255 |
+
plt.imshow(wordcloud, interpolation='bilinear')
|
| 256 |
+
plt.axis('off')
|
| 257 |
+
st.pyplot()
|
| 258 |
+
|
| 259 |
+
if sentiment == "negative":
|
| 260 |
+
# Extract the text from the word cloud object
|
| 261 |
+
generated_text = wordcloud.words_
|
| 262 |
+
|
| 263 |
+
# Print the generated text
|
| 264 |
+
for word, frequency in generated_text.items():
|
| 265 |
+
# print(f"{word}: {frequency}")
|
| 266 |
+
text_for_llama += str(word)+" "
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# Generate a word cloud from all the text data
|
| 270 |
+
all_text = ' '.join(Instructor_comment)
|
| 271 |
+
all_text = all_text.split()
|
| 272 |
+
filtered_words = [word for word in all_text if len(word) > 3]
|
| 273 |
+
all_text = ' '.join(filtered_words)
|
| 274 |
+
|
| 275 |
+
st.write("### Word Cloud for All Sentiments")
|
| 276 |
+
wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text)
|
| 277 |
+
plt.figure(figsize=(8, 8), facecolor=None)
|
| 278 |
+
plt.imshow(wordcloud)
|
| 279 |
+
plt.axis("off")
|
| 280 |
+
st.pyplot()
|
| 281 |
+
|
| 282 |
+
neg_comments = []
|
| 283 |
+
pos_comments = []
|
| 284 |
+
# Print the comments and predicted sentiments
|
| 285 |
+
for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
|
| 286 |
+
if sentiment == "positive": pos_comments.append(comment)
|
| 287 |
+
else: neg_comments.append(comment)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
if text_for_llama == "":
|
| 291 |
+
st.title("Expressing Gratitude and Dedication")
|
| 292 |
+
text_for_llama = f"""
|
| 293 |
+
There's no negative feedback/comments to the instructor, give him/her short email to say.
|
| 294 |
+
[Your Name] = The Management
|
| 295 |
+
[Instructor's Name] = {Instructor}
|
| 296 |
+
"""
|
| 297 |
+
else:
|
| 298 |
+
st.title('Recommendation:')
|
| 299 |
+
text_for_llama = text_for_llama.split()
|
| 300 |
+
text_for_llama = ", ".join(text_for_llama)
|
| 301 |
+
text_for_llama = f"""
|
| 302 |
+
Based from these students' feedback: {str(text_for_llama)}. \n
|
| 303 |
+
Please generate a short email to teh instructor having 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
|
| 304 |
+
define the best email subject based from the recomendation
|
| 305 |
+
[Your Name] = The Management
|
| 306 |
+
[Instructor's Name] = {Instructor}
|
| 307 |
+
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
# text_for_llama = f"""
|
| 311 |
+
# Based from these students' feedback: {str(text_for_llama)}. \n
|
| 312 |
+
# Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
|
| 313 |
+
|
| 314 |
+
# """
|
| 315 |
+
|
| 316 |
+
# text_for_llama = f"""
|
| 317 |
+
# Based from these students' feedback: {str(text_for_llama)}. \n
|
| 318 |
+
# and Overall score per criteria results: {str(criteria_results)}. \n
|
| 319 |
+
# Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
|
| 320 |
+
# """
|
| 321 |
+
# Then give insights about the evaluation report based from different criteria.
|
| 322 |
+
# Here is the results: {criteria_results}
|
| 323 |
+
# Your response format-
|
| 324 |
+
# Recommendation to Instructor:
|
| 325 |
+
# Insights on Evaluation Report:
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
prompt = text_for_llama
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
# # ================================================ replicate.com
|
| 334 |
+
# CUDA_LAUNCH_BLOCKING=1
|
| 335 |
+
# import replicate
|
| 336 |
+
# replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW')
|
| 337 |
+
# output = replicate.run(
|
| 338 |
+
# # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
|
| 339 |
+
# "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
|
| 340 |
+
# input={"prompt": prompt}
|
| 341 |
+
# )
|
| 342 |
+
# st.write(output)
|
| 343 |
+
# # The meta/llama-2-70b-chat model can stream output as it's running.
|
| 344 |
+
# # The predict method returns an iterator, and you can iterate over that output.
|
| 345 |
+
# # ================================================
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# # st.title('Recommendation:')
|
| 349 |
+
# # llama_output = ""
|
| 350 |
+
# # with st.spinner("Generating Recommendation"):
|
| 351 |
+
# # loading_text = st.empty()
|
| 352 |
+
# # for item in reponse(prompt):
|
| 353 |
+
# # llama_output +=item
|
| 354 |
+
# # loading_text.write(llama_output)
|
| 355 |
+
# # st.success("Generation Complete!")
|
| 356 |
+
|
| 357 |
+
# # ================================================ local llama llm_chain
|
| 358 |
+
while True:
|
| 359 |
+
try:
|
| 360 |
+
with st.spinner("Generating...."):
|
| 361 |
+
# st.write(llm_chain.run(prompt))
|
| 362 |
+
# st.write(g4f_prompt(prompt)) #################
|
| 363 |
+
st.success("Generation Complete!")
|
| 364 |
+
break
|
| 365 |
+
|
| 366 |
+
except Exception as e:
|
| 367 |
+
# Handle the error (e.g., log it or take appropriate action)
|
| 368 |
+
# Sleep for a moment before retrying
|
| 369 |
+
# st.write("Error occurred.. Retrying")
|
| 370 |
+
pass
|
| 371 |
+
# time.sleep(0.4)
|
| 372 |
+
# # ================================================
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
|
app5_selectbox/evaluation_analysis copy.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gspread
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from oauth2client.service_account import ServiceAccountCredentials
|
| 4 |
+
from transformers import BertForSequenceClassification, BertTokenizer
|
| 5 |
+
import torch
|
| 6 |
+
import streamlit as st
|
| 7 |
+
from matplotlib import pyplot as plt
|
| 8 |
+
import numpy as np
|
| 9 |
+
from wordcloud import WordCloud
|
| 10 |
+
from PIL import ImageFont
|
| 11 |
+
from app5_selectbox.langchain_llama_gpu import llm_chain
|
| 12 |
+
|
| 13 |
+
# # Load the model and tokenizer
|
| 14 |
+
# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
|
| 15 |
+
# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
|
| 16 |
+
|
| 17 |
+
def eval_analysis(Instructor, Instructor_comment, criteria_results):
|
| 18 |
+
# # Authenticate with Google Sheets API
|
| 19 |
+
# scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
|
| 20 |
+
# creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope)
|
| 21 |
+
# client = gspread.authorize(creds)
|
| 22 |
+
|
| 23 |
+
# # Open the spreadsheet by its title
|
| 24 |
+
# spreadsheet = client.open('survey (Responses)')
|
| 25 |
+
|
| 26 |
+
# # Select a specific worksheet
|
| 27 |
+
# worksheet = spreadsheet.worksheet('Form Responses 1')
|
| 28 |
+
|
| 29 |
+
# # Read data from the worksheet
|
| 30 |
+
# data = worksheet.get_all_values()
|
| 31 |
+
|
| 32 |
+
# # Create a Pandas DataFrame from the data
|
| 33 |
+
# df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers
|
| 34 |
+
# df = df.iloc[:, [1, 2]] # Filter columns
|
| 35 |
+
|
| 36 |
+
# #
|
| 37 |
+
# instructor_list = df.iloc[:, 0].unique()
|
| 38 |
+
# instructor_list = sorted(instructor_list)
|
| 39 |
+
# # print(instructor_list)
|
| 40 |
+
|
| 41 |
+
# # Create a dropdown widget in the sidebar
|
| 42 |
+
# option = st.sidebar.selectbox("Select an option", instructor_list)
|
| 43 |
+
|
| 44 |
+
# # Filter rows containing "Instructor 1"
|
| 45 |
+
# Instructor = df[df['Instructor'] == option]
|
| 46 |
+
# Instructor_comment = Instructor['comment'].tolist()
|
| 47 |
+
# ##################################################### BERT MODEL
|
| 48 |
+
# def perform_sentiment_analysis(text):
|
| 49 |
+
# inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
| 50 |
+
# with torch.no_grad():
|
| 51 |
+
# outputs = model(**inputs)
|
| 52 |
+
# logits = outputs.logits
|
| 53 |
+
# predicted_class = torch.argmax(logits, dim=1).item()
|
| 54 |
+
# sentiment_labels = ["negative", "neutral", "positive"]
|
| 55 |
+
# sentiment = sentiment_labels[predicted_class]
|
| 56 |
+
# return sentiment
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# from transformers import BertForSequenceClassification, BertTokenizer
|
| 60 |
+
|
| 61 |
+
# # Load the model and tokenizer
|
| 62 |
+
# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
|
| 63 |
+
# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
|
| 64 |
+
|
| 65 |
+
# # sample_texts_tfidf = vectorizer.transform(sample_texts)
|
| 66 |
+
# # sample_predictions = classifier.predict(sample_texts_tfidf)
|
| 67 |
+
|
| 68 |
+
# sample_predictions = []
|
| 69 |
+
|
| 70 |
+
# # Initialize counters for sentiment classes
|
| 71 |
+
# negative_count = 0
|
| 72 |
+
# neutral_count = 0
|
| 73 |
+
# positive_count = 0
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# for text in Instructor_comment:
|
| 77 |
+
# predicted_class = perform_sentiment_analysis(text)
|
| 78 |
+
# print(f"Text: {text}")
|
| 79 |
+
# print(f"Predicted Sentiment: {predicted_class}")
|
| 80 |
+
# sample_predictions.append(predicted_class)
|
| 81 |
+
# if predicted_class == "negative":
|
| 82 |
+
# negative_count += 1
|
| 83 |
+
# elif predicted_class == "neutral":
|
| 84 |
+
# neutral_count += 1
|
| 85 |
+
# else:
|
| 86 |
+
# positive_count += 1
|
| 87 |
+
|
| 88 |
+
# print(f'negative_count {negative_count}')
|
| 89 |
+
# print(f'neutral_count {neutral_count}')
|
| 90 |
+
# print(f'positive_count {positive_count}')
|
| 91 |
+
|
| 92 |
+
################################################### scikit learn model
|
| 93 |
+
|
| 94 |
+
# import joblib
|
| 95 |
+
# # Load the model and vectorizer for predictions
|
| 96 |
+
# loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl")
|
| 97 |
+
|
| 98 |
+
# # Transform the new text data using the loaded vectorizer
|
| 99 |
+
# new_text_features = loaded_vectorizer.transform(Instructor_comment)
|
| 100 |
+
|
| 101 |
+
# # Make predictions using the loaded model
|
| 102 |
+
# predicted_class = loaded_model.predict(new_text_features)
|
| 103 |
+
# # print(f"Predicted class: {predicted_class}")
|
| 104 |
+
|
| 105 |
+
# sample_predictions = []
|
| 106 |
+
|
| 107 |
+
# # Initialize counters for sentiment classes
|
| 108 |
+
# negative_count = 0
|
| 109 |
+
# neutral_count = 0
|
| 110 |
+
# positive_count = 0
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# for text, prediction in zip(Instructor_comment, predicted_class):
|
| 114 |
+
# print(f"Text: {text}")
|
| 115 |
+
# print(f"Predicted Sentiment: {prediction}")
|
| 116 |
+
# sample_predictions.append(prediction)
|
| 117 |
+
# if prediction == "negative":
|
| 118 |
+
# negative_count += 1
|
| 119 |
+
# elif prediction == "neutral":
|
| 120 |
+
# neutral_count += 1
|
| 121 |
+
# else:
|
| 122 |
+
# positive_count += 1
|
| 123 |
+
|
| 124 |
+
# print(f'negative_count {negative_count}')
|
| 125 |
+
# print(f'neutral_count {neutral_count}')
|
| 126 |
+
# print(f'positive_count {positive_count}')
|
| 127 |
+
|
| 128 |
+
################################################### bert2 model
|
| 129 |
+
import torch
|
| 130 |
+
from transformers import BertTokenizer, BertForSequenceClassification
|
| 131 |
+
import numpy as np
|
| 132 |
+
|
| 133 |
+
# Load the saved model
|
| 134 |
+
loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model')
|
| 135 |
+
tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# Encode the sample comments
|
| 139 |
+
sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt')
|
| 140 |
+
|
| 141 |
+
# Make predictions on the sample comments
|
| 142 |
+
sample_input_ids = sample_encodings['input_ids']
|
| 143 |
+
sample_attention_mask = sample_encodings['attention_mask']
|
| 144 |
+
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask)
|
| 147 |
+
|
| 148 |
+
# Get predicted labels
|
| 149 |
+
sample_logits = sample_outputs.logits
|
| 150 |
+
sample_predictions = np.argmax(sample_logits, axis=1)
|
| 151 |
+
|
| 152 |
+
# Map predicted labels back to sentiment labels
|
| 153 |
+
sentiment_labels = ['negative', 'positive']
|
| 154 |
+
predicted_sentiments = [sentiment_labels[label] for label in sample_predictions]
|
| 155 |
+
|
| 156 |
+
# # Print the comments and predicted sentiments
|
| 157 |
+
# for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
|
| 158 |
+
# print(f"Comment: {comment}")
|
| 159 |
+
# print(f"Predicted Sentiment: {sentiment}")
|
| 160 |
+
# print()
|
| 161 |
+
|
| 162 |
+
sample_predictions = []
|
| 163 |
+
|
| 164 |
+
# Initialize counters for sentiment classes
|
| 165 |
+
negative_count = 0
|
| 166 |
+
neutral_count = 0
|
| 167 |
+
positive_count = 0
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
for text, prediction in zip(Instructor_comment, predicted_sentiments):
|
| 171 |
+
print(f"Text: {text}")
|
| 172 |
+
print(f"Predicted Sentiment: {prediction}")
|
| 173 |
+
sample_predictions.append(prediction)
|
| 174 |
+
if prediction == "negative":
|
| 175 |
+
negative_count += 1
|
| 176 |
+
elif prediction == "neutral":
|
| 177 |
+
neutral_count += 1
|
| 178 |
+
else:
|
| 179 |
+
positive_count += 1
|
| 180 |
+
|
| 181 |
+
print(f'negative_count {negative_count}')
|
| 182 |
+
print(f'neutral_count {neutral_count}')
|
| 183 |
+
print(f'positive_count {positive_count}')
|
| 184 |
+
|
| 185 |
+
###################################################
|
| 186 |
+
|
| 187 |
+
# Create a Streamlit app
|
| 188 |
+
st.title("Sentiment Analysis Dashboard")
|
| 189 |
+
st.sidebar.header("Settings")
|
| 190 |
+
|
| 191 |
+
link_text = "Instructor Survey"
|
| 192 |
+
link_url = "https://forms.gle/64n9CXMDRP2NYgZYA"
|
| 193 |
+
st.sidebar.markdown(f"[{link_text}]({link_url})")
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# Display sentiment counts
|
| 197 |
+
st.write("### Sentiment Counts")
|
| 198 |
+
st.write(f"Negative: {negative_count}")
|
| 199 |
+
# st.write(f"Neutral: {neutral_count}")
|
| 200 |
+
st.write(f"Positive: {positive_count}")
|
| 201 |
+
|
| 202 |
+
# Plot sentiment distribution
|
| 203 |
+
sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts()
|
| 204 |
+
desired_order = ['positive',
|
| 205 |
+
# 'neutral',
|
| 206 |
+
'negative']
|
| 207 |
+
sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
|
| 208 |
+
percentage_distribution = sentiment_counts / len(sample_predictions) * 100
|
| 209 |
+
|
| 210 |
+
st.write("### Sentiment Distribution")
|
| 211 |
+
fig, ax = plt.subplots(figsize=(8, 6))
|
| 212 |
+
bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red'])
|
| 213 |
+
plt.xlabel('Sentiment')
|
| 214 |
+
plt.ylabel('Count')
|
| 215 |
+
plt.title('Sentiment Distribution in Sample Predictions')
|
| 216 |
+
plt.xticks(rotation=45)
|
| 217 |
+
for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order):
|
| 218 |
+
height = bar.get_height()
|
| 219 |
+
ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom')
|
| 220 |
+
st.pyplot(fig)
|
| 221 |
+
|
| 222 |
+
st.set_option('deprecation.showPyplotGlobalUse', False)
|
| 223 |
+
|
| 224 |
+
# Generate word clouds based on sentiment categories
|
| 225 |
+
sentiment_texts = {
|
| 226 |
+
'positive': [],
|
| 227 |
+
# 'neutral': [],
|
| 228 |
+
'negative': []
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
for text, sentiment in zip(Instructor_comment, sample_predictions):
|
| 232 |
+
sentiment_texts[sentiment].append(text)
|
| 233 |
+
|
| 234 |
+
text_for_llama = ""
|
| 235 |
+
|
| 236 |
+
for sentiment, texts in sentiment_texts.items():
|
| 237 |
+
combined_texts = ' '.join(texts)
|
| 238 |
+
combined_texts = combined_texts.split()
|
| 239 |
+
filtered_words = [word for word in combined_texts if len(word) > 2]
|
| 240 |
+
combined_texts = ' '.join(filtered_words)
|
| 241 |
+
if combined_texts =="": continue
|
| 242 |
+
# Load your custom TrueType font using PIL
|
| 243 |
+
font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file
|
| 244 |
+
# custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed
|
| 245 |
+
# Set the font family to use the TrueType font
|
| 246 |
+
# font = ImageFont.truetype(font_path)
|
| 247 |
+
|
| 248 |
+
wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts)
|
| 249 |
+
st.write(f"### Word Cloud for {sentiment} Sentiment")
|
| 250 |
+
plt.figure(figsize=(10, 6))
|
| 251 |
+
plt.imshow(wordcloud, interpolation='bilinear')
|
| 252 |
+
plt.axis('off')
|
| 253 |
+
st.pyplot()
|
| 254 |
+
|
| 255 |
+
if sentiment == "negative":
|
| 256 |
+
# Extract the text from the word cloud object
|
| 257 |
+
generated_text = wordcloud.words_
|
| 258 |
+
|
| 259 |
+
# Print the generated text
|
| 260 |
+
for word, frequency in generated_text.items():
|
| 261 |
+
# print(f"{word}: {frequency}")
|
| 262 |
+
text_for_llama += str(word)+" "
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# Generate a word cloud from all the text data
|
| 266 |
+
all_text = ' '.join(Instructor_comment)
|
| 267 |
+
all_text = all_text.split()
|
| 268 |
+
filtered_words = [word for word in all_text if len(word) > 3]
|
| 269 |
+
all_text = ' '.join(filtered_words)
|
| 270 |
+
|
| 271 |
+
st.write("### Word Cloud for All Sentiments")
|
| 272 |
+
wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text)
|
| 273 |
+
plt.figure(figsize=(8, 8), facecolor=None)
|
| 274 |
+
plt.imshow(wordcloud)
|
| 275 |
+
plt.axis("off")
|
| 276 |
+
st.pyplot()
|
| 277 |
+
|
| 278 |
+
neg_comments = []
|
| 279 |
+
pos_comments = []
|
| 280 |
+
# Print the comments and predicted sentiments
|
| 281 |
+
for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
|
| 282 |
+
if sentiment == "positive": pos_comments.append(comment)
|
| 283 |
+
else: neg_comments.append(comment)
|
| 284 |
+
|
| 285 |
+
text_for_llama = text_for_llama.split()
|
| 286 |
+
text_for_llama = ", ".join(text_for_llama)
|
| 287 |
+
text_for_llama = f"""
|
| 288 |
+
Based from these students' feedback: {str(text_for_llama)}. \n
|
| 289 |
+
Please generate a recommendation to the instructor. Make it in sentence type and in English language only.
|
| 290 |
+
Then give insights about the evaluation report based from different criteria.
|
| 291 |
+
Here is the results: {criteria_results}
|
| 292 |
+
Your response format-
|
| 293 |
+
Recommendation to Instructor:
|
| 294 |
+
Insights on Evaluation Report:
|
| 295 |
+
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
prompt = text_for_llama
|
| 299 |
+
# # ================================================ replicate.com
|
| 300 |
+
# CUDA_LAUNCH_BLOCKING=1
|
| 301 |
+
# import replicate
|
| 302 |
+
# replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW')
|
| 303 |
+
# output = replicate.run(
|
| 304 |
+
# # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
|
| 305 |
+
# "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
|
| 306 |
+
# input={"prompt": prompt}
|
| 307 |
+
# )
|
| 308 |
+
# # The meta/llama-2-70b-chat model can stream output as it's running.
|
| 309 |
+
# # The predict method returns an iterator, and you can iterate over that output.
|
| 310 |
+
# # ================================================
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
# st.title('Recommendation:')
|
| 314 |
+
# llama_output = ""
|
| 315 |
+
# with st.spinner("Generating Recommendation"):
|
| 316 |
+
# loading_text = st.empty()
|
| 317 |
+
# for item in reponse(prompt):
|
| 318 |
+
# llama_output +=item
|
| 319 |
+
# loading_text.write(llama_output)
|
| 320 |
+
# st.success("Generation Complete!")
|
| 321 |
+
|
| 322 |
+
st.title('Recommendation:')
|
| 323 |
+
llama_output = ""
|
| 324 |
+
with st.spinner("Generating Recommendation"):
|
| 325 |
+
st.write(llm_chain.run(prompt))
|
| 326 |
+
st.success("Generation Complete!")
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
|