diff --git a/HF_inference.cpython-310.pyc b/HF_inference.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3dd045b2b3dd5036622c2843464da08834bf5269
Binary files /dev/null and b/HF_inference.cpython-310.pyc differ
diff --git a/HF_inference.py b/HF_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..66b90ba1ed563913d108b58fd8f6a8e60c644d8c
--- /dev/null
+++ b/HF_inference.py
@@ -0,0 +1,92 @@
+# import requests
+# import time
+# import streamlit as st
+# import os
+
+
+# # SECRET_TOKEN
+# SECRET_TOKEN = os.getenv("HF_IBOA")
+
+# DISTILIBERT = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased"
+# BERTLARGE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased"
+# BERTBASE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased"
+
+# headers = {"Authorization": SECRET_TOKEN}
+
+# # @st.cache_resource
+# @st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
+# def query(payload, selected_model):
+# if selected_model == "DISTILIBERT MODEL":
+# API_URL = DISTILIBERT
+# elif selected_model == "BERT-LARGE MODEL":
+# API_URL = BERTLARGE
+# elif selected_model == "BERT-BASE MODEL":
+# API_URL = BERTBASE
+# else:
+# API_URL = DISTILIBERT
+
+# start_time = time.time()
+# counter = 0
+# with st.spinner("Processing..."):
+# while True:
+# response = requests.post(API_URL, headers=headers, json=payload)
+# # st.write(response)
+# if response.status_code == 200:
+
+# return response.json()
+# else:
+# time.sleep(1) # Wait for 1 second before retrying
+
+# def analyze_sintement(text, selected_model):
+# output = query({"inputs": text}, selected_model)
+# if output:
+# # st.success(f"Translation complete!")
+# return output[0][0]['label'], output[0][0]['score']
+# else:
+# st.warning("Error! Please try again.")
+
+
+
+import requests
+import time
+import streamlit as st
+import os
+
+# Define constants for API URLs
+MODEL_URLS = {
+ "DISTILIBERT MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased",
+ "BERT-LARGE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased",
+ "BERT-BASE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased"
+}
+
+# SECRET_TOKEN
+SECRET_TOKEN = os.getenv("HF_IBOA")
+
+# Set headers
+headers = {"Authorization": SECRET_TOKEN}
+
+# Define retry parameters
+MAX_RETRIES = 3
+RETRY_INTERVAL = 1 # in seconds
+
+@st.cache_resource(experimental_allow_widgets=True, show_spinner=False)
+def query(payload, selected_model):
+ # st.write(selected_model)
+ API_URL = MODEL_URLS.get(selected_model, MODEL_URLS[selected_model]) # Get API URL based on selected model
+
+ for retry in range(MAX_RETRIES):
+ response = requests.post(API_URL, headers=headers, json=payload)
+ if response.status_code == 200:
+ return response.json()
+ else:
+ time.sleep(RETRY_INTERVAL)
+
+ return None
+
+def analyze_sintement(text, selected_model):
+ output = query({"inputs": text}, selected_model)
+ if output:
+ return output[0][0]['label'], output[0][0]['score']
+ else:
+ st.warning("Error! Please try again.")
+ pass
\ No newline at end of file
diff --git a/__pycache__/HF_inference.cpython-310.pyc b/__pycache__/HF_inference.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3dd045b2b3dd5036622c2843464da08834bf5269
Binary files /dev/null and b/__pycache__/HF_inference.cpython-310.pyc differ
diff --git a/__pycache__/app5.cpython-310.pyc b/__pycache__/app5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34cb8bf1c02e830a0619fd09cbd3e03e86246e17
Binary files /dev/null and b/__pycache__/app5.cpython-310.pyc differ
diff --git a/app5.cpython-310.pyc b/app5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34cb8bf1c02e830a0619fd09cbd3e03e86246e17
Binary files /dev/null and b/app5.cpython-310.pyc differ
diff --git a/app5.py b/app5.py
new file mode 100644
index 0000000000000000000000000000000000000000..a33acb3e12b92b973bb468eb6bc2affe71a3bbf0
--- /dev/null
+++ b/app5.py
@@ -0,0 +1,204 @@
+import streamlit as st
+import time
+from app5_selectbox import academic_list, class_tbl, instructor, program, student, subject, subj_inst, evaluation, evaluation_fac
+from app5_selectbox.database_con import cursor, db_connection
+
+
+def student_login(username, password):
+ cursor.execute(f"SELECT s.stud_id, s.stud_name, s.class_id, s.user_type FROM student s WHERE s.stud_username='{username}' AND s.stud_password='{password}'")
+ return cursor.fetchone()
+
+def instructor_login(username, password):
+ cursor.execute(f"SELECT i.inst_id, i.inst_name, i.prog_id FROM instructor i WHERE i.inst_username='{username}' AND i.inst_password='{password}'")
+ return cursor.fetchone()
+
+
+def app5():
+ st.title("Student-Faculty Evaluation")
+
+ if not hasattr(st.session_state, "logged_in") or not st.session_state.logged_in:
+ st.subheader("User Login")
+ username = st.text_input("Username")
+ password = st.text_input("Password", type="password")
+
+ if st.button("Login", type="primary"):
+ student_info = student_login(username, password)
+
+ if student_info:
+ st.success(f"Hello, {student_info[1]}! Login Successful")
+ st.session_state.logged_in = True
+ st.session_state.student_id = student_info[0]
+ st.session_state.class_id = student_info[2]
+ st.session_state.user_type = student_info[3]
+ time.sleep(1)
+ st.experimental_rerun()
+
+ elif not student_info:
+ instructor_info = instructor_login(username, password)
+ if instructor_info:
+ st.success(f"Hello, {instructor_info[1]}! Login Successful")
+ st.session_state.logged_in = True
+ st.session_state.inst_id = instructor_info[0]
+ st.session_state.inst_name = instructor_info[1]
+ st.session_state.prog_id = instructor_info[2]
+ st.session_state.user_type = 'faculty'
+ time.sleep(1)
+ st.experimental_rerun()
+ else:
+ st.error("Invalid Credentials")
+
+ else:
+ st.error("Invalid Credentials")
+ else:
+ if st.session_state.user_type == 'student':
+ cursor.execute(f"SELECT s.stud_name, c.class_year, c.class_section FROM student s JOIN class c ON s.class_id = c.class_id WHERE s.stud_id='{st.session_state.student_id}'")
+ student_info = cursor.fetchone()
+ student_name, class_year, class_section = student_info
+
+ st.subheader(f"Hello, {student_name} (Class Year: {class_year}, Section: {class_section}) - Student Evaluation")
+
+ cursor.execute(f"""
+ SELECT si.subj_inst_id, si.sub_id_code, sub.sub_name, i.inst_name
+ FROM subj_inst si
+ LEFT JOIN evaluation e ON e.subj_inst_id = si.subj_inst_id AND e.stud_id = {st.session_state.student_id}
+ INNER JOIN subject sub ON sub.sub_id_code = si.sub_id_code
+ INNER JOIN instructor i ON i.inst_id = si.inst_id
+ WHERE e.stud_id IS NULL AND si.class_id = '{st.session_state.class_id}'
+ """)
+
+ subjects = cursor.fetchall()
+ subject_names = [f"{subject[2]} with Instructor: {subject[3]}" for subject in subjects]
+ if not subjects:
+ st.warning("You have evaluated all available subjects. Thank you!")
+ st.balloons()
+
+ progress_text = "logging-out . ..."
+ my_bar = st.progress(0, text=progress_text)
+ for percent_complete in range(100):
+ time.sleep(0.01)
+ my_bar.progress(percent_complete + 1, text=progress_text)
+
+ cursor.execute(f"UPDATE student SET is_eval='TRUE' WHERE stud_id = '{st.session_state.student_id}'")
+ db_connection.commit()
+ st.session_state.pop("logged_in", None)
+ st.session_state.pop("student_id", None)
+ st.session_state.pop("class_id", None)
+ st.experimental_rerun()
+
+ else:
+ selected_subject = st.selectbox("Select a Subject to Evaluate", subject_names)
+ selected_subject_id = None
+
+ for sel_subject in subjects:
+ if f"{sel_subject[2]} with Instructor: {sel_subject[3]}" == selected_subject:
+ selected_subject_id = sel_subject[0]
+
+ keys = {}
+ if selected_subject_id:
+ st.write(f"You are evaluating the {selected_subject}.")
+ criteria_list = [
+ "Teaching Effectiveness",
+ "Course Organization",
+ "Accessibility and Communication",
+ "Assessment and Grading",
+ "Respect and Inclusivity",
+ "Engagement and Interactivity",
+ "Feedback and Improvement",
+ "Accessibility of Learning Resources",
+ "Passion and Enthusiasm",
+ "Professionalism and Ethical Conduct",
+ ]
+ criteria = {}
+
+ for i in range(10):
+ criteria_key = f"criteria_{i}_{selected_subject_id}"
+ criteria_text = f"{criteria_list[i]} (1-5)"
+ criteria[i] = st.slider(criteria_text, 1.00, 5.00, 1.00, step=0.05, key=criteria_key)
+ keys[f"criteria_{i}"] = criteria_key
+
+ feedback_comment_key = f"feedback_comment_{selected_subject_id}"
+ feedback_comment = st.text_area("Feedback/Comments", key=feedback_comment_key)
+
+ if st.button("Submit Evaluation"):
+ if not feedback_comment:
+ st.warning("Please provide feedback comments.")
+ else:
+ cursor.execute(f"SELECT si.inst_id FROM subj_inst si WHERE si.subj_inst_id = '{selected_subject_id}'")
+ instructor_id = cursor.fetchone()
+
+ if instructor_id:
+ instructor_id = instructor_id[0]
+
+ cursor.execute(f"""INSERT INTO evaluation (
+ stud_id,
+ subj_inst_id,
+ inst_id,
+ Teaching_Effectiveness,
+ Course_Organization,
+ Accessibility_and_Communication,
+ Assessment_and_Grading,
+ Respect_and_Inclusivity,
+ Engagement_and_Interactivity,
+ Feedback_and_Improvement,
+ Accessibility_of_Learning_Resources,
+ Passion_and_Enthusiasm,
+ Professionalism_and_Ethical_Conduct,
+ comments,
+ eval_timestamp)
+ VALUES ('{st.session_state.student_id}', '{selected_subject_id}', '{instructor_id}', '{criteria[0]}', '{criteria[1]}', '{criteria[2]}', '{criteria[3]}', '{criteria[4]}', '{criteria[5]}', '{criteria[6]}', '{criteria[7]}', '{criteria[8]}', '{criteria[9]}','{feedback_comment}', strftime('%Y-%m-%d %H:%M:%S','now'))""")
+ db_connection.commit()
+
+ with st.empty():
+ st.write("Submitting evaluation...")
+ time.sleep(0.3)
+ st.success("Evaluation submitted successfully")
+ time.sleep(0.4)
+
+ feedback_comment = ""
+
+ st.experimental_rerun()
+ else:
+ for i in keys.keys():
+ keys[i] = None
+ feedback_comment = None
+
+ if st.button("Log Out"):
+ st.session_state.pop("logged_in", None)
+ st.session_state.pop("student_id", None)
+ st.session_state.pop("class_id", None)
+ st.experimental_rerun()
+ elif st.session_state.user_type == 'faculty':
+ evaluation_fac.evaluation()
+
+ elif st.session_state.user_type == 'admin':
+ table_name = st.sidebar.selectbox("Select Table", ("academic_list", "class", "instructor", "program", "student", "subject", "subj_inst", "evaluation"))
+
+ if table_name == "academic_list":
+ academic_list.academic_list(table_name)
+ elif table_name == "class":
+ class_tbl.class_tbl(table_name)
+ elif table_name == "instructor":
+ instructor.instructor(table_name)
+ elif table_name == "program":
+ program.program(table_name)
+ elif table_name == "student":
+ student.student(table_name)
+ elif table_name == "subject":
+ subject.subject(table_name)
+ elif table_name == "subj_inst":
+ subj_inst.subj_inst(table_name)
+ elif table_name == "evaluation":
+ evaluation.evaluation()
+ else:
+ st.error("Select a valid table from the sidebar.")
+
+ if st.button("Log Out"):
+ st.session_state.pop("logged_in", None)
+ st.session_state.pop("student_id", None)
+ st.session_state.pop("class_id", None)
+ st.experimental_rerun()
+
+
+# Call the main function
+if __name__ == "__main__":
+ app5()
diff --git a/app5_selectbox/QuartzoBold-W9lv.ttf b/app5_selectbox/QuartzoBold-W9lv.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..2fafbad1b1b9ad077a935d56de23adf0413a1410
Binary files /dev/null and b/app5_selectbox/QuartzoBold-W9lv.ttf differ
diff --git a/app5_selectbox/__pycache__/academic_list.cpython-310.pyc b/app5_selectbox/__pycache__/academic_list.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ab59f1d2b6d0b6d6e14f6f0ffb59d9ba1ad4c8e
Binary files /dev/null and b/app5_selectbox/__pycache__/academic_list.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/academic_list.cpython-39.pyc b/app5_selectbox/__pycache__/academic_list.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9fa37b5546d32bf3178351b68aeed40f28eeffd8
Binary files /dev/null and b/app5_selectbox/__pycache__/academic_list.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fae158e526c3b11dcadf01252df183b9ec8d5dd
Binary files /dev/null and b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26e7a7675d3b085a4ed1567fe19dbc827f891d60
Binary files /dev/null and b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc b/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebeb39882e02df8becf336924a0a50e11632a063
Binary files /dev/null and b/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc b/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6ba9d4a84fafe25e25f805420b4de0a6c35160b
Binary files /dev/null and b/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/database.cpython-39.pyc b/app5_selectbox/__pycache__/database.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80cf69267c36fa2ddafe67f0eeb8fdba8f82107b
Binary files /dev/null and b/app5_selectbox/__pycache__/database.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/database_con.cpython-310.pyc b/app5_selectbox/__pycache__/database_con.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4d6a6653dbd70299616d10f39355c8f17a7dc6d
Binary files /dev/null and b/app5_selectbox/__pycache__/database_con.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/database_con.cpython-39.pyc b/app5_selectbox/__pycache__/database_con.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e3393879d514468862ffc927a2e008eaa4bd68e
Binary files /dev/null and b/app5_selectbox/__pycache__/database_con.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/db_con.cpython-39.pyc b/app5_selectbox/__pycache__/db_con.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..104095742d1f8f26b3597816fdba590d3c76fed3
Binary files /dev/null and b/app5_selectbox/__pycache__/db_con.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc b/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..939872a1ea0f95f6ef70c0a0e649aefe681fa1d4
Binary files /dev/null and b/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91eeeff86fc632c80c49329ba0866f0e6fb2c70b
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3fadbfb5bb456f4ec8aae946cacc343572fe17a
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08d3772fcf03b66842532d42f631cd6ad3b4df8c
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8c1a2bbad9847abe2eae1db7a2aa52401c64354
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..edadd89598b192129a7a4249f810a512f855e98d
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3f3fd84e888f2b462ffecdf26d02b9d8e376303
Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc b/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..984e427a66751f41f313612c6f70932c446fca6e
Binary files /dev/null and b/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc b/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64b47a59f874743afe0a8e345b8217ce4f4a69e8
Binary files /dev/null and b/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/instructor.cpython-310.pyc b/app5_selectbox/__pycache__/instructor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e071509cfc4977cc96e836ee1f75446615fd1f1e
Binary files /dev/null and b/app5_selectbox/__pycache__/instructor.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/instructor.cpython-39.pyc b/app5_selectbox/__pycache__/instructor.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d1eb7e16a326294e729e5087f4906e9590bd172
Binary files /dev/null and b/app5_selectbox/__pycache__/instructor.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc b/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed7414086a3f01ebf785858198addd74ba922858
Binary files /dev/null and b/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc b/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1a4c028ffb198c335fc89a7d1c50e0e69e38a8c
Binary files /dev/null and b/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc b/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e30cc23706910219b9df0d9de41c5ac6578e6a1b
Binary files /dev/null and b/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc b/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70d3c24d597ba706b8092c49c291ad0e47dfe38d
Binary files /dev/null and b/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/program.cpython-310.pyc b/app5_selectbox/__pycache__/program.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..176e4a3d69c8874a5dda3a32264e85a402ae2244
Binary files /dev/null and b/app5_selectbox/__pycache__/program.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/program.cpython-39.pyc b/app5_selectbox/__pycache__/program.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9369c6cf5603463793b16527f8c0dd941beae34
Binary files /dev/null and b/app5_selectbox/__pycache__/program.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/student.cpython-310.pyc b/app5_selectbox/__pycache__/student.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89057e2a0d86fea11df4eb146c13fc8162f2f176
Binary files /dev/null and b/app5_selectbox/__pycache__/student.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/student.cpython-39.pyc b/app5_selectbox/__pycache__/student.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52e8c683edeb58ea01c7e236c5d4c96b7283dd2a
Binary files /dev/null and b/app5_selectbox/__pycache__/student.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc b/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd0fc7b0b7d1f698e0919b52f38505ee8842a7f5
Binary files /dev/null and b/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc b/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..00a76e49c867a7ba6eda53f24ded4a5cdb3bf9c7
Binary files /dev/null and b/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc differ
diff --git a/app5_selectbox/__pycache__/subject.cpython-310.pyc b/app5_selectbox/__pycache__/subject.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e9bdd42df15c7c220c60483aa033d0aa9f97c04
Binary files /dev/null and b/app5_selectbox/__pycache__/subject.cpython-310.pyc differ
diff --git a/app5_selectbox/__pycache__/subject.cpython-39.pyc b/app5_selectbox/__pycache__/subject.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d0745f01864f89be62b8ba2bd29943aaf7041a0
Binary files /dev/null and b/app5_selectbox/__pycache__/subject.cpython-39.pyc differ
diff --git a/app5_selectbox/academic_list.py b/app5_selectbox/academic_list.py
new file mode 100644
index 0000000000000000000000000000000000000000..88d35afa04c8fcf0927223e73b8a66efe2fd15cf
--- /dev/null
+++ b/app5_selectbox/academic_list.py
@@ -0,0 +1,32 @@
+# academic_list.py
+import streamlit as st
+from app5_selectbox.database_con import cursor, db_connection #connect_to_database, execute_query
+from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
+
+
+def academic_list(table_name):
+ # Include the academic_list-specific code here
+ acad_id = generate_unique_4(cursor, "acad_id", table_name)
+ acad_year = st.text_input("Academic Year", key="acad_year")
+ sem_num = st.selectbox("Semester Number", ("1", "2"), key="sem_num")
+
+ if st.button("Insert Academic List Record"):
+ # Check if the acad_year and sem_num are provided
+ if not acad_year or not sem_num:
+ st.error("Academic Year and Semester Number are required. Please provide values for both fields.")
+ else:
+ try:
+ # Check for duplicates in acad_year and sem_num
+ cursor.execute("SELECT acad_id FROM academic_list WHERE acad_year = %s AND sem_num = %s", (acad_year, sem_num))
+ duplicate = cursor.fetchone()
+ if duplicate is not None:
+ st.error("Duplicate entry found. Please provide unique Academic Year and Semester Number.")
+ else:
+ # Insert a record into the academic_list table
+ cursor.execute("INSERT INTO academic_list (acad_id, acad_year, sem_num) VALUES (%s, %s, %s)",
+ (acad_id, acad_year, sem_num))
+ db_connection.commit()
+ st.success("Record inserted successfully.")
+ except Exception as e:
+ st.error(f"An error occurred: {str(e)}")
+ display_table(cursor, table_name)
diff --git a/app5_selectbox/app5_selectbox_func.py b/app5_selectbox/app5_selectbox_func.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c77b41e23303c2fdba934ef3c6bfacce3dcc3c7
--- /dev/null
+++ b/app5_selectbox/app5_selectbox_func.py
@@ -0,0 +1,31 @@
+import random
+import streamlit as st
+import time
+import pandas as pd
+
+def generate_unique_4(cursor, col_id, tblname):
+ while True:
+ unique_id = random.randint(1000, 9999)
+ cursor.execute(f"SELECT {col_id} FROM {tblname} WHERE {col_id} = {unique_id}")
+ result = cursor.fetchone()
+ if result is None:
+ return unique_id
+
+def display_table(cursor, table_name):
+ try:
+ cursor.execute(f"pragma table_info('{table_name}')")
+ column_data = cursor.fetchall()
+ column_names = [column[1] for column in column_data]
+
+ cursor.execute(f"SELECT * FROM {table_name}")
+ data = cursor.fetchall()
+
+ if not data:
+ st.warning(f"No data found in the {table_name} table.")
+ else:
+ df = pd.DataFrame(data, columns=column_names)
+ st.header(f"{table_name} Table")
+ st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
+
+ except Exception as e:
+ st.error(f"An error occurred while fetching data from {table_name}: {str(e)}")
diff --git a/app5_selectbox/class_tbl.py b/app5_selectbox/class_tbl.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1f0a218667591a0cbfe8b6b17048639a5cb2138
--- /dev/null
+++ b/app5_selectbox/class_tbl.py
@@ -0,0 +1,47 @@
+# class.py
+import streamlit as st
+import pandas as pd
+from app5_selectbox.database_con import cursor, db_connection
+from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
+
+
+# In the display_table function, fetch and display prog_code
+def display_class_table(cursor, table_name):
+ if table_name == "class":
+ cursor.execute("SELECT class.class_id, class.prog_id, program.prog_code, class.class_year, class.class_section FROM class INNER JOIN program ON class.prog_id = program.prog_id")
+ data = cursor.fetchall()
+ column_names = [i[0] for i in cursor.description]
+ df = pd.DataFrame(data, columns=column_names)
+ st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
+
+def class_tbl(table_name):
+ class_id = generate_unique_4(cursor, "class_id", table_name)
+
+ # Fetch available programs from the 'program' table
+ cursor.execute("SELECT prog_id, prog_name, prog_code FROM program")
+ available_programs = cursor.fetchall()
+ prog_id = st.selectbox("Program ID", available_programs, format_func=lambda row: f"{row[1]} ({row[2]})", key="prog_id")[0]
+ class_year = st.selectbox("Class Year", ("1", "2", "3", "4"), key="class_year")
+ class_section = st.text_input("Class Section", key="class_section", max_chars=1).upper()
+
+ if st.button("Insert Class Record"):
+ # Check if the class_year and class_section are provided
+ if not class_year or not class_section:
+ st.error("Class Year and Class Section are required. Please provide values for both fields.")
+ else:
+ try:
+ # Check for duplicates
+ cursor.execute("SELECT class_id FROM class WHERE prog_id = %s AND class_year = %s AND class_section = %s",
+ (prog_id, class_year, class_section))
+ result = cursor.fetchone()
+ if result is not None:
+ st.error("A record with the same Program ID, Class Year, and Class Section already exists.")
+ else:
+ # Insert a record into the class table
+ cursor.execute("INSERT INTO class (class_id, prog_id, class_year, class_section) VALUES (%s, %s, %s, %s)",
+ (class_id, prog_id, class_year, class_section))
+ db_connection.commit()
+ st.success("Record inserted successfully.")
+ except Exception as e:
+ st.error(f"An error occurred: {str(e)}")
+ display_class_table(cursor, table_name)
\ No newline at end of file
diff --git a/app5_selectbox/database_con.py b/app5_selectbox/database_con.py
new file mode 100644
index 0000000000000000000000000000000000000000..37ad00856279c07e9611b292915be53c37307738
--- /dev/null
+++ b/app5_selectbox/database_con.py
@@ -0,0 +1,18 @@
+# # database.py
+# import mysql.connector
+
+# # Connect to your MySQL database
+# db_connection = mysql.connector.connect(
+# host="localhost",
+# user="root",
+# password="",
+# database="university_evaluation_5"
+# )
+
+# cursor = db_connection.cursor()
+
+#### for sqlite connection ####
+
+import sqlite3
+db_connection = sqlite3.connect('/home/aibo/prototype_v1/prototype/database/data.sqlite', check_same_thread=False)
+cursor = db_connection.cursor()
diff --git a/app5_selectbox/df4_sentiment_analysis.py b/app5_selectbox/df4_sentiment_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b54fdf6a2c30d8ff5065e7edbb5e035fce129a6
--- /dev/null
+++ b/app5_selectbox/df4_sentiment_analysis.py
@@ -0,0 +1,60 @@
+import g4f
+import time
+
+
+def sentiment_func(message_list):
+ message_list=[
+ "Your lectures were so dull and uninspiring, I couldn't help but zone out",
+ "you have an extraordinary talent for leadership.",
+ "The instructor's indifference made it difficult to remain engaged or motivated",
+ "The lack of enthusiasm from the instructor made the class feel like a chore",
+ "Salamat sa iyong inspirasyon at dedikasyon sa aming edukasyon.",
+ "Sa bawat pagkakataon, lumalalim ang aming pag-unawa sa mga aralin.",
+ "Thanks for being dedicated to our education.",
+ "You show the societal importance of education.",
+ "The instructor's disinterested demeanor was reflected in the overall class atmosphere"
+ ]
+
+ message_list = '[label]\n'.join(message_list)
+ # print(message_list)
+ prompt = f"""
+ Please provide a single-word response per sentence.
+ label the following sentences if it is positive,negative
+ sentence list = {message_list}
+ your output is should in comma separated
+ example output : positive,negative,negative,positive
+ """
+ # Please provide a single-word response.
+
+ print(prompt)
+ while True:
+ try:
+ # streamed completion
+ response = g4f.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ # provider=g4f.Provider.GeekGpt,
+ provider=g4f.Provider.You,
+
+ # model="gpt-4",
+ # provider=g4f.Provider.Bing,
+
+ messages=[{"role": "user", "content": prompt}],
+ stream=True,
+ )
+ returned_output = ""
+ for message in response:
+ # print(message, flush=True, end='')
+ returned_output += message
+ # print(message)
+ returned_output = returned_output.split(',')
+ # Trim extra white spaces and convert to lowercase
+ returned_output = [item.strip().lower() for item in returned_output]
+ return returned_output
+ # print(returned_output)
+ break # Exit the loop if the chat completes successfully
+
+ except Exception as e:
+ # Handle the error (e.g., log it or take appropriate action)
+ # Sleep for a moment before retrying
+ print("error....",e)
+ time.sleep(0.4)
diff --git a/app5_selectbox/evaluation copy 2.py b/app5_selectbox/evaluation copy 2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a976c54aef652f3558e5f56cd3fb983885beff5
--- /dev/null
+++ b/app5_selectbox/evaluation copy 2.py
@@ -0,0 +1,281 @@
+# evaluation.py
+import streamlit as st
+import pandas as pd
+from app5_selectbox.database_con import cursor, db_connection
+from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
+from app5_selectbox.evaluation_analysis import eval_analysis
+
+import matplotlib.pyplot as plt
+import seaborn as sns
+import plotly.express as px
+import plotly.graph_objs as go
+
+
+
+# Function to perform analytics on instructors
+def analyze_instructors(cursor):
+ try:
+ # Execute the SQL query to fetch the evaluation data
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ else:
+ # Create a DataFrame from the fetched data and set column names
+ column_names = [i[0].replace("_", " ") for i in cursor.description]
+ df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # Get the column names for the score criteria
+ criteria_columns = [f"score_criteria_{i}" for i in range(10)]
+ column_names = [column[0].replace("_", " ") for column in cursor.description][4:14]
+ # Define criteria labels globally
+ criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
+
+ instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index()
+
+ cursor.execute("SELECT inst_id, inst_name FROM instructor")
+ instructor_data = cursor.fetchall()
+ instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"])
+ instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left")
+
+ selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique())
+
+ filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]]
+
+ selected_instructor_comments = list(filtered_data["comments"])
+ st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
+
+ cursor.execute("""
+ SELECT subj_inst.subj_inst_id, subject.sub_name
+ FROM subj_inst
+ INNER JOIN subject
+ ON subj_inst.sub_id_code = subject.sub_id_code
+ """)
+
+ subject_data = cursor.fetchall()
+ subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"])
+ filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left")
+
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+
+ fig = go.Figure()
+
+ # for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]:
+ # fig.add_trace(go.Bar(
+ # x=subject_avg_scores["sub_name"],
+ # y=subject_avg_scores[criterion],
+ # name=label,
+ # ))
+
+ criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
+ for criterion, label in criteria_labels:
+ fig.add_trace(go.Bar(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores[criterion],
+ name=label,
+ ))
+
+ # Add the total average score above the bars
+ fig.add_trace(go.Scatter(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores["total average"],
+ mode="markers+text",
+ text=round(subject_avg_scores["total average"],2),
+ textposition="top center",
+ textfont=dict(size=14),
+ marker=dict(size=10, color="black"),
+ name="Total Average",
+ ))
+
+ # Display the overall average of all subjects
+ overall_average = subject_avg_scores["total average"].mean()
+ # st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}")
+ fig.update_layout(
+ barmode="group",
+ title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
+ xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}",
+ yaxis_title="Average Score",
+ )
+ st.plotly_chart(fig)
+
+
+
+ # st.write("**Average score per Criteria**")
+ results_to_prompt = "Average score per Criteria\n"
+ criteria_averages = []
+ for criteria in filtered_data.columns[4:14]:
+ average_score = round(sum(filtered_data[criteria] / len(filtered_data)), 2)
+ criteria_averages.append((criteria, average_score))
+ results_to_prompt += f"{criteria}: {average_score}/5, \n"
+ # print(results_to_prompt)
+
+ # st.write(results_to_prompt)
+ # # Create a Plotly bar chart
+ fig = go.Figure()
+ fig.add_trace(go.Bar(
+ x=[criteria for criteria, _ in criteria_averages],
+ y=[score for _, score in criteria_averages],
+ text=[f"{score}/5" for _, score in criteria_averages],
+ # textposition='outside',
+ ))
+
+ fig.update_layout(
+ title="Average Score per Criteria",
+ xaxis_title="Criteria",
+ yaxis_title="Average Score",
+ )
+
+ st.plotly_chart(fig)
+
+
+
+
+
+
+
+
+
+
+ for subject in subject_avg_scores["sub name"]:
+ subject_filtered_data = filtered_data[filtered_data["sub name"] == subject]
+
+ fig = go.Figure()
+ st.write(subject_filtered_data)
+ for criterion, label in criteria_labels:
+ fig.add_trace(go.Bar(
+ x=[label],
+ y=[subject_filtered_data[criterion].mean()],
+ text=[subject_filtered_data[criterion].mean()],
+ name=label,
+ ))
+
+ # Calculate the "total average" based on criteria columns
+ total_average = subject_filtered_data[column_names].mean(axis=1).mean()
+
+ # # dot point for Total Average"
+ # fig.add_trace(go.Scatter(
+ # x=[label],
+ # y=[total_average],
+ # mode="markers+text",
+ # text=[round(total_average, 2)],
+ # textposition="top center",
+ # textfont=dict(size=14),
+ # marker=dict(size=10, color="black"),
+ # name="Total Average",
+ # ))
+
+ fig.update_layout(
+ barmode="group",
+ title=f"{subject} Average Score: {total_average:.2f}",
+ # xaxis_title=f"Overall Average Score: {total_average:.2f}",
+ yaxis_title="Average Score",
+ )
+ st.plotly_chart(fig)
+
+ # selected_instructor_comments.append(results_to_prompt)
+ # st.write(selected_instructor_comments)
+ return selected_instructor, selected_instructor_comments, results_to_prompt
+
+ except Exception as e:
+ st.error(f"An error occurred during data analytics: {str(e)}")
+
+
+ # try:
+ # # Execute the SQL query to fetch the evaluation data
+ # cursor.execute("SELECT * FROM evaluation")
+ # evaluation_data = cursor.fetchall()
+
+ # if not evaluation_data:
+ # st.warning("No evaluation data found.")
+ # else:
+ # # Create a DataFrame from the fetched data and set column names
+ # column_names = [i[0] for i in cursor.description]
+ # df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # # Group data by instructor and calculate average scores per criteria
+ # instructor_avg_scores = df.groupby("inst_id").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+
+ # # Join with instructor data to get their names
+ # cursor.execute("SELECT inst_id, inst_name FROM instructor")
+ # instructor_data = cursor.fetchall()
+ # instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"])
+ # instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
+
+ # # Join with subj_inst and subject tables to get subject names
+ # cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code")
+ # subject_data = cursor.fetchall()
+ # subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"])
+ # df = df.merge(subject_df, on="subj_inst_id", how="left")
+
+ # # Create a select box to filter by instructor and subject
+ # selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique())
+ # selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique()
+ # selected_subject = st.selectbox("Select Subject", selected_subjects)
+
+ # # Filter data based on the selected instructor and subject
+ # filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) &
+ # (df["sub_name"] == selected_subject)]
+
+ # # Create a bar chart for average scores per criteria
+ # fig = px.bar(instructor_avg_scores, x="instructor_name",
+ # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
+ # labels={"value": "Average Score", "variable": "Criteria"},
+ # title="Average Scores per Criteria by Instructor")
+ # st.plotly_chart(fig)
+
+ # # Group data by subject instructor and calculate average scores
+ # subject_avg_scores = filtered_data.groupby("sub_name").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+
+ # # Create a bar chart for average scores per criteria for the selected subject
+ # fig = px.bar(subject_avg_scores, x="sub_name",
+ # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
+ # labels={"value": "Average Score", "variable": "Criteria"},
+ # title=f"Average Scores per Criteria for Subject {selected_subject}")
+ # st.plotly_chart(fig)
+
+ # except Exception as e:
+ # st.error(f"An error occurred during data analytics: {str(e)}")
+
+
+
+
+
+
+def evaluation(cursor, table_name):
+ try:
+ # Execute the SQL query to fetch the evaluation data
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ else:
+ # Create a DataFrame from the fetched data and set column names
+ column_names = [i[0] for i in cursor.description]
+ df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # # Display the table with centered text
+ # st.header(f"{table_name} Table")
+ # st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
+
+ analyze_instructors_results = analyze_instructors(cursor)
+
+ if st.button("Analyze comments"):
+ # st.write(analyze_instructors_results[0], analyze_instructors_results[1])
+ eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2])
+
+
+ except Exception as e:
+ st.error(f"An error occurred while fetching evaluation data: {str(e)}")
diff --git a/app5_selectbox/evaluation copy.py b/app5_selectbox/evaluation copy.py
new file mode 100644
index 0000000000000000000000000000000000000000..341536db1914ca5a4e90df7666eb34bd9af0c417
--- /dev/null
+++ b/app5_selectbox/evaluation copy.py
@@ -0,0 +1,250 @@
+# evaluation.py
+import streamlit as st
+import pandas as pd
+from app5_selectbox.database_con import cursor, db_connection
+from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4
+from app5_selectbox.evaluation_analysis import eval_analysis
+
+import matplotlib.pyplot as plt
+import seaborn as sns
+import plotly.express as px
+import plotly.graph_objs as go
+
+
+
+# Function to perform analytics on instructors
+def analyze_instructors(cursor):
+ try:
+ # Execute the SQL query to fetch the evaluation data
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ else:
+ # Create a DataFrame from the fetched data and set column names
+ column_names = [i[0].replace("_"," ") for i in cursor.description]
+ # for i in range(len(column_names)):
+ # column_names[i] = column_names[i].replace("_"," ")
+ # st.write(column_names)
+ # .replace("_"," ")
+ df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # # Group data by instructor and calculate average scores per criteria
+ # instructor_avg_scores = df.groupby("inst_id").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+ # Get the column names from the cursor description
+
+
+
+ criteria_columns = [f"score_criteria_{i}" for i in range(10)]
+
+ column_names = [column[0].replace("_"," ") for column in cursor.description][4:14]
+ # Print the column names
+
+
+ instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index()
+
+
+ # Join with instructor data to get their names
+ cursor.execute("SELECT inst_id, inst_name FROM instructor")
+ instructor_data = cursor.fetchall()
+ instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"])
+ instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left")
+
+
+
+
+ # Create a select box to filter by instructor
+ selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique())
+
+
+
+ # Filter data based on the selected instructor
+ filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]]
+ # st.write(filtered_data[filtered_data.columns[4:15]])
+
+ # st.write(selected_instructor)
+ selected_instructor_comments = list(filtered_data["comments"])
+ # st.write(selected_instructor_comments) #get all comments fro the instructor
+ st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
+
+ # Join with the subj_inst and subject tables to get subject names
+ cursor.execute("""
+ SELECT subj_inst.subj_inst_id, subject.sub_name
+ FROM subj_inst
+ INNER JOIN subject
+ ON subj_inst.sub_id_code = subject.sub_id_code
+ """)
+
+ subject_data = cursor.fetchall()
+ subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"])
+ filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left")
+
+ # # Group data by subject and calculate average scores per criteria
+ # subject_avg_scores = filtered_data.groupby("sub_name").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+
+ # criteria_columns = [f"score_criteria_{i}" for i in range(10)]
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+
+
+ # # Calculate the total average score for each subject
+ # subject_avg_scores["total_average"] = subject_avg_scores[["score_criteria_1", "score_criteria_2", "score_criteria_3"]].mean(axis=1)
+ # criteria_columns = [f"score_criteria_{i}" for i in range(10)]
+
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+
+
+ # Create a grouped bar chart for average scores per criteria by subject
+ fig = go.Figure()
+
+ # for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]:
+ # fig.add_trace(go.Bar(
+ # x=subject_avg_scores["sub_name"],
+ # y=subject_avg_scores[criterion],
+ # name=label,
+ # ))
+
+ criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)]
+ for criterion, label in criteria_labels:
+ fig.add_trace(go.Bar(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores[criterion],
+ name=label,
+ ))
+
+ # Add the total average score above the bars
+ fig.add_trace(go.Scatter(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores["total average"],
+ mode="markers+text",
+ text=round(subject_avg_scores["total average"],2),
+ textposition="top center",
+ textfont=dict(size=14),
+ marker=dict(size=10, color="black"),
+ name="Total Average",
+ ))
+
+ # Display the overall average of all subjects
+ overall_average = subject_avg_scores["total average"].mean()
+ # st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}")
+ fig.update_layout(
+ barmode="group",
+ title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
+ xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}",
+ yaxis_title="Average Score",
+ )
+ st.plotly_chart(fig)
+
+
+ return selected_instructor, selected_instructor_comments
+
+
+
+ except Exception as e:
+ st.error(f"An error occurred during data analytics: {str(e)}")
+
+
+ # try:
+ # # Execute the SQL query to fetch the evaluation data
+ # cursor.execute("SELECT * FROM evaluation")
+ # evaluation_data = cursor.fetchall()
+
+ # if not evaluation_data:
+ # st.warning("No evaluation data found.")
+ # else:
+ # # Create a DataFrame from the fetched data and set column names
+ # column_names = [i[0] for i in cursor.description]
+ # df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # # Group data by instructor and calculate average scores per criteria
+ # instructor_avg_scores = df.groupby("inst_id").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+
+ # # Join with instructor data to get their names
+ # cursor.execute("SELECT inst_id, inst_name FROM instructor")
+ # instructor_data = cursor.fetchall()
+ # instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"])
+ # instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
+
+ # # Join with subj_inst and subject tables to get subject names
+ # cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code")
+ # subject_data = cursor.fetchall()
+ # subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"])
+ # df = df.merge(subject_df, on="subj_inst_id", how="left")
+
+ # # Create a select box to filter by instructor and subject
+ # selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique())
+ # selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique()
+ # selected_subject = st.selectbox("Select Subject", selected_subjects)
+
+ # # Filter data based on the selected instructor and subject
+ # filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) &
+ # (df["sub_name"] == selected_subject)]
+
+ # # Create a bar chart for average scores per criteria
+ # fig = px.bar(instructor_avg_scores, x="instructor_name",
+ # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
+ # labels={"value": "Average Score", "variable": "Criteria"},
+ # title="Average Scores per Criteria by Instructor")
+ # st.plotly_chart(fig)
+
+ # # Group data by subject instructor and calculate average scores
+ # subject_avg_scores = filtered_data.groupby("sub_name").agg({
+ # "score_criteria_1": "mean",
+ # "score_criteria_2": "mean",
+ # "score_criteria_3": "mean"
+ # }).reset_index()
+
+ # # Create a bar chart for average scores per criteria for the selected subject
+ # fig = px.bar(subject_avg_scores, x="sub_name",
+ # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"],
+ # labels={"value": "Average Score", "variable": "Criteria"},
+ # title=f"Average Scores per Criteria for Subject {selected_subject}")
+ # st.plotly_chart(fig)
+
+ # except Exception as e:
+ # st.error(f"An error occurred during data analytics: {str(e)}")
+
+
+
+
+
+
+def evaluation(cursor, table_name):
+ try:
+ # Execute the SQL query to fetch the evaluation data
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ else:
+ # Create a DataFrame from the fetched data and set column names
+ column_names = [i[0] for i in cursor.description]
+ df = pd.DataFrame(evaluation_data, columns=column_names)
+
+ # # Display the table with centered text
+ # st.header(f"{table_name} Table")
+ # st.dataframe(df.style.set_properties(**{'text-align': 'center'}))
+
+ analyze_instructors_results = analyze_instructors(cursor)
+
+ if st.button("Analyze comments"):
+ # st.write(analyze_instructors_results[0], analyze_instructors_results[1])
+ eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1])
+
+
+ except Exception as e:
+ st.error(f"An error occurred while fetching evaluation data: {str(e)}")
diff --git a/app5_selectbox/evaluation.py b/app5_selectbox/evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..32034c44abc5cee00291a5721a61a6ca0f9ec15e
--- /dev/null
+++ b/app5_selectbox/evaluation.py
@@ -0,0 +1,412 @@
+import streamlit as st
+import pandas as pd
+import plotly.graph_objs as go
+import time
+import plotly.express as px
+import ast
+import numpy as np
+
+
+from app5_selectbox.database_con import cursor, db_connection
+from app5_selectbox.app5_selectbox_func import generate_unique_4
+from app5_selectbox.evaluation_analysis import eval_analysis
+# from app5_selectbox.evaluation_analysis_g4f import eval_analysis
+
+# from app5_selectbox.langchain_llama_gpu import llm_chain
+from app5_selectbox.g4f_prompt import g4f_prompt
+
+# Function to fetch evaluation data
+def fetch_evaluation_data():
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ return None
+ column_names = [i[0] for i in cursor.description]
+ return pd.DataFrame(evaluation_data, columns=column_names)
+
+# Function to analyze instructors
+def analyze_instructors(evaluation_df):
+
+ if evaluation_df is None:
+ return
+
+ column_names = evaluation_df.columns[4:14]
+ criteria_labels = [column.replace("_", " ") for column in column_names]
+
+ cursor.execute("SELECT * FROM instructor")
+ instructor_data = cursor.fetchall()
+
+ # st.write(instructor_data)
+
+ instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor name","program code","user name","password"])
+ instructor_avg_scores = evaluation_df.groupby("inst_id")[column_names].mean().reset_index()
+ instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
+
+
+ # st.write(instructor_avg_scores)
+ # programs_list = sorted(instructor_avg_scores["program code"].unique())
+
+ # Fetch program options from the program table
+ cursor.execute("SELECT prog_id, prog_code, prog_name FROM program")
+ selected_program = pd.DataFrame(cursor.fetchall(), columns=["prog_id", "prog_code", "prog_name"])
+ # st.write(selected_program)
+ # st.write(list({str(prog): prog[0] for prog in program_options}))
+ selected_program_select = st.selectbox("Select Program", selected_program["prog_code"])
+ # selected_program = ast.literal_eval(str(selected_program))
+
+ # selected_program = st.selectbox("Select Program", programs_list)
+ filtered_instructor_list = pd.DataFrame(instructor_avg_scores)
+ # st.write(filtered_instructor_list)
+ mask = filtered_instructor_list["program code"] == selected_program.loc[selected_program['prog_code'] == selected_program_select, 'prog_id'].values[0]
+ # st.write(mask)
+ filtered_instructor_list = filtered_instructor_list.loc[mask]
+
+ # st.write(filtered_instructor_list)
+ instructors_list = sorted(filtered_instructor_list["instructor name"].unique())
+ # print(type(instructor_avg_scores))
+
+ # instructors_list = instructor_avg_scores.query("program code == {selected_program}")
+ # st.write(len(instructors_list)) # df to graph
+
+ selected_instructor = st.selectbox("Select Instructor", instructors_list)
+
+ filtered_data = evaluation_df[evaluation_df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst_id"].values[0]]
+
+ selected_instructor_comments = list(filtered_data["comments"])
+ st.write(filtered_data)
+ st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students")
+
+ cursor.execute("""
+ SELECT subj_inst.subj_inst_id, subject.sub_name
+ FROM subj_inst
+ INNER JOIN subject
+ ON subj_inst.sub_id_code = subject.sub_id_code
+ """)
+
+ # Assuming you have a DataFrame named 'filtered_data'
+ # and column_names is a list of column names you want to consider for calculating average scores
+
+ # Convert all columns to numeric data
+ filtered_data[column_names] = filtered_data[column_names].apply(pd.to_numeric, errors='coerce')
+
+ # Fetch subject data from the cursor
+ subject_data = cursor.fetchall()
+
+ # Create a DataFrame for subject data
+ subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub name"])
+
+ # Merge subject data with filtered data based on 'subj_inst_id'
+ filtered_data = filtered_data.merge(subject_df, on="subj_inst_id", how="left")
+
+ # Group by subject name and calculate average scores
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+
+ # Calculate total average and add it as a new column
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+ fig = go.Figure()
+
+ for criterion, label in zip(column_names, criteria_labels):
+ fig.add_trace(go.Bar(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores[criterion],
+ name=label,
+ ))
+
+ # Add the total average score above the bars
+ total_average = subject_avg_scores["total average"].mean()
+ fig.add_trace(go.Scatter(
+ x=subject_avg_scores["sub name"],
+ y=subject_avg_scores["total average"],
+ mode="markers+text",
+ text=round(subject_avg_scores["total average"], 2),
+ textposition="top center",
+ textfont=dict(size=14),
+ marker=dict(size=10, color="black"),
+ name="Total Average",
+ ))
+
+ fig.update_layout(
+ width=1000,height=600,
+ barmode="group",
+ title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}",
+ xaxis_title=f"Overall Average Score (All Subjects): {total_average:.2f}",
+ yaxis_title="Average Score",
+ )
+ st.plotly_chart(fig)
+
+ results_to_prompt = "Average score per Criteria\n"
+ criteria_averages = [(criteria.replace("_", " "), round(filtered_data[criteria].mean(), 2)) for criteria in column_names]
+ for criteria, average_score in criteria_averages:
+ results_to_prompt += f"{criteria}: {average_score}/5, \n"
+
+ fig = go.Figure()
+ fig.add_trace(go.Bar(
+ x=[criteria for criteria, _ in criteria_averages],
+ y=[average_score for _, average_score in criteria_averages],
+ text=[f"{average_score}/5" for _, average_score in criteria_averages],
+ ))
+
+ fig.update_layout(
+ width=1000,
+ title="Average Score per Criteria",
+ xaxis_title="Criteria",
+ yaxis_title="Average Score",
+ )
+
+ st.plotly_chart(fig)
+ results_to_prompt = f"""
+ Based from these over-all average score please Analyze it and provide short insights: {str(results_to_prompt)}.
+ Make it in sentence type and in English language only.
+
+ """
+ while True:
+ try:
+ with st.spinner("Analyzing... "):
+ # st.write(llm_chain.run(prompt))
+ st.write(g4f_prompt(results_to_prompt)) #############################
+ st.success("Analyzing Complete!")
+ break
+
+ except Exception as e:
+ # Handle the error (e.g., log it or take appropriate action)
+ # Sleep for a moment before retrying
+ # st.write("Error occurred.. Retrying")
+ pass
+ # time.sleep(0.4)
+ # Add pie graph of evaluation distribution per student's section
+ # Fetch program options from the program table
+ cursor.execute(f"""
+ SELECT
+ pr.prog_code || '-' || c.class_year || '-' || c.class_section AS merged_result,
+ COUNT(*) AS occurrence_count
+ FROM
+ student s
+ JOIN
+ class c ON s.class_id = c.class_id
+ JOIN
+ program pr ON c.prog_id = pr.prog_id
+ WHERE
+ s.stud_id IN {tuple(list(filtered_data["stud_id"]))}
+ GROUP BY
+ s.class_id, pr.prog_code, c.class_year, c.class_section;
+
+ """)
+
+ merged_result = pd.DataFrame(cursor.fetchall(), columns=["merged_result", "occurrence_count"])
+ st.write(filtered_data)
+ st.write(merged_result)
+ # section_counts = filtered_data["stud_id"].value_counts()
+ # st.write(section_counts)
+
+ fig = px.pie(
+ merged_result,
+ values="occurrence_count",
+ names="merged_result",
+ title="Evaluation Distribution per Student's Section",
+ )
+
+ # Add percentage and occurrence_count to the hover information
+ fig.update_traces(
+ hovertemplate="%{label}: %{percent}
Occurrence Count: %{value}",
+ textinfo="percent+value",
+ )
+
+ fig.update_layout(
+ width=600,
+ height=600,
+ font=dict(size=20),
+ )
+ st.plotly_chart(fig)
+
+
+
+ cursor.execute(f"""
+ SELECT
+ s.class_id,
+ pr.prog_code || '-' || c.class_year || '-' || c.class_section AS class_info,
+ COUNT(DISTINCT s.stud_id) AS num_respondents,
+ ROUND((AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
+ AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
+ AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
+ AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10, 2) AS avg_overall,
+ ROUND((COUNT(DISTINCT s.stud_id) * (AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
+ AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
+ AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
+ AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10), 2) AS weighted_avg_overall
+ FROM
+ evaluation e
+ JOIN
+ student s ON e.stud_id = s.stud_id
+ JOIN
+ class c ON s.class_id = c.class_id
+ JOIN
+ program pr ON c.prog_id = pr.prog_id
+ WHERE
+ s.stud_id IN {tuple(list(filtered_data["stud_id"]))}
+ GROUP BY
+ s.class_id, pr.prog_code, c.class_year, c.class_section, class_info;
+ """)
+
+ avg_scores_per_class = pd.DataFrame(cursor.fetchall(), columns=[
+ "class_id",
+ "class_info",
+ "num_respondents",
+ "avg_overall",
+ "weighted_avg_overall"
+ ])
+
+ # Calculate the last row's weighted_avg_overall / num_respondents
+ last_row_index = avg_scores_per_class["weighted_avg_overall"].last_valid_index()
+ if last_row_index is not None:
+ avg_scores_per_class.at[last_row_index, "weighted_avg_overall"] /= avg_scores_per_class.at[last_row_index, "num_respondents"]
+
+ # Convert the column to decimal.Decimal before rounding
+ avg_scores_per_class["weighted_avg_overall"] = avg_scores_per_class["num_respondents"] * avg_scores_per_class["avg_overall"] # avg_scores_per_class["weighted_avg_overall"].apply(lambda x: round(float(x), 2))
+
+ # Drop rows with None values
+ avg_scores_per_class = avg_scores_per_class.dropna()
+
+
+ # Calculate the overall averages for avg_overall and weighted_avg_overall
+ num_respondents = round(avg_scores_per_class["num_respondents"].sum(), 2)
+ overall_avg_overall = round(avg_scores_per_class["avg_overall"].mean(), 2)
+ overall_weighted_avg_overall = round(avg_scores_per_class["weighted_avg_overall"].sum(),2)
+ weighted_avg_overall = round(overall_weighted_avg_overall / num_respondents,2)
+
+ # # Append an additional row for avg_overall and weighted_avg_overall
+ # avg_scores_per_class = avg_scores_per_class.append({
+ # "class_id": int(avg_scores_per_class["class_id"].max()) + 1,
+ # "class_info": "Total",
+ # "num_respondents": avg_scores_per_class["num_respondents"].sum(),
+ # "avg_overall": round(overall_avg_overall, 2),
+ # "weighted_avg_overall": round(overall_weighted_avg_overall / avg_scores_per_class["num_respondents"].sum(), 2)
+ # }, ignore_index=True)
+
+ # st.write(avg_scores_per_class.style.set_properties(**{'text-align': 'center'}))
+
+
+
+ # Add summary rows to the DataFrame
+ avg_scores_per_class = avg_scores_per_class.append({
+ "class_id": "",
+ "class_info": "Summary",
+ "num_respondents": num_respondents,
+ "avg_overall": " ",
+ "weighted_avg_overall": overall_weighted_avg_overall
+ }, ignore_index=True)
+
+
+ def calculate_satisfaction(weighted_avg_overall):
+ if weighted_avg_overall > 4:
+ return "Outstanding"
+ elif weighted_avg_overall > 3:
+ return "Above Average"
+ elif weighted_avg_overall > 2:
+ return "Average"
+ elif weighted_avg_overall > 1:
+ return "Below Average"
+ else:
+ return "Unsatisfactory"
+
+ def highlight_cell(col, col_label, row_label):
+ # check if col is a column we want to highlight
+ if col.name == col_label:
+ # a boolean mask where True represents a row we want to highlight
+ mask = (col.index == row_label)
+ # return an array of string styles (e.g. ["", "background-color: yellow"])
+ return ["background-color: lightgreen" if val_bool else "" for val_bool in mask]
+ else:
+ # return an array of empty strings that has the same size as col (e.g. ["",""])
+ return np.full_like(col, "", dtype="str")
+
+ avg_scores_per_class = avg_scores_per_class.append({
+ "class_id": "",
+ "class_info": "Weighted Avg.",
+ "num_respondents": " ", # You can set this to "N/A" or any appropriate value
+ "avg_overall": calculate_satisfaction(weighted_avg_overall), # You can set this to "N/A" or any appropriate value
+ "weighted_avg_overall": weighted_avg_overall
+ }, ignore_index=True)
+
+
+ # st.dataframe(avg_scores_per_class.style.background_gradient(subset=["C"], cmap="RdYlGn", vmin=0, vmax=2.5))
+ avg_scores_per_class =avg_scores_per_class.style.apply(highlight_cell, col_label="avg_overall", row_label=9)
+
+ st.write(avg_scores_per_class)
+ st.write(f"Number of respondents: {num_respondents}")
+ st.write(f"Overall weighted avg.: {overall_weighted_avg_overall}")
+ st.write(f"Weighted avg overall: {weighted_avg_overall}")
+
+
+
+
+
+ # if st.button("Analyze the results", key="analyze_results"):
+
+ for subject in subject_avg_scores["sub name"]:
+ with st.expander(subject):
+ subject_filtered_data = filtered_data[filtered_data["sub name"] == subject]
+ promt_txt = ""
+ fig = go.Figure()
+
+ # st.write(subject_filtered_data) # displays DF for every graphs
+ for criterion, label in zip(column_names, criteria_labels):
+ text = round(subject_filtered_data[criterion].mean(),2)
+ fig.add_trace(go.Bar(
+ x=[label],
+ y=[text],
+ text=text,
+ name=label,
+ ))
+ promt_txt += criterion.replace("_", " ") + ": " + str(text)+ "\n"
+ # st.text(promt_txt) # prompt per graph
+
+ total_average = subject_filtered_data[column_names].mean(axis=1).mean()
+
+ total_average_txt = f"{subject} Average Score: {round(total_average,2)}/5"
+ fig.update_layout(
+ barmode="group",
+ width=1000,
+ title=total_average_txt,
+ yaxis_title="Average Score",
+ )
+ st.plotly_chart(fig)
+
+ prompt = f"generate a very short insights about this faculty evaluation result for the subject {subject}?\n{promt_txt}\nplease strictly shorten your response in sentence format"
+ # st.text(prompt)
+ while True:
+ with st.spinner("Generating Recommendation"):
+ try:
+ st.write(g4f_prompt(prompt)) #############################
+ # pass
+ # break
+ break
+ except Exception as e:
+ # Handle the error (e.g., log it or take appropriate action)
+ # Sleep for a moment before retrying
+ # st.write("Error occurred.. Retrying")
+ pass
+ # time.sleep(0.4)
+
+ return selected_instructor, selected_instructor_comments, results_to_prompt
+
+def evaluation():
+
+ try:
+ evaluation_df = fetch_evaluation_data()
+ if evaluation_df is not None:
+ analyze_instructors_results = analyze_instructors(evaluation_df)
+ # if st.button("Analyze comments"):
+ # eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2])
+
+ with st.expander("Sentiment Analysis"):
+
+ eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2]) #############################
+ # pass
+
+ except Exception as e:
+ st.error(f"An error occurred: {str(e)}")
diff --git a/app5_selectbox/evaluation_analysis copy 2.py b/app5_selectbox/evaluation_analysis copy 2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5557f9b873dc924a289e43dfb897ab07e9c28ee
--- /dev/null
+++ b/app5_selectbox/evaluation_analysis copy 2.py
@@ -0,0 +1,378 @@
+import gspread
+import pandas as pd
+from oauth2client.service_account import ServiceAccountCredentials
+from transformers import BertForSequenceClassification, BertTokenizer
+import torch
+import streamlit as st
+from matplotlib import pyplot as plt
+import numpy as np
+from wordcloud import WordCloud
+from PIL import ImageFont
+# from app5_selectbox.langchain_llama_gpu import llm_chain
+from app5_selectbox.g4f_prompt import g4f_prompt
+
+# # Load the model and tokenizer
+# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+def eval_analysis(Instructor, Instructor_comment, criteria_results):
+ # # Authenticate with Google Sheets API
+ # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
+ # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope)
+ # client = gspread.authorize(creds)
+
+ # # Open the spreadsheet by its title
+ # spreadsheet = client.open('survey (Responses)')
+
+ # # Select a specific worksheet
+ # worksheet = spreadsheet.worksheet('Form Responses 1')
+
+ # # Read data from the worksheet
+ # data = worksheet.get_all_values()
+
+ # # Create a Pandas DataFrame from the data
+ # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers
+ # df = df.iloc[:, [1, 2]] # Filter columns
+
+ # #
+ # instructor_list = df.iloc[:, 0].unique()
+ # instructor_list = sorted(instructor_list)
+ # # print(instructor_list)
+
+ # # Create a dropdown widget in the sidebar
+ # option = st.sidebar.selectbox("Select an option", instructor_list)
+
+ # # Filter rows containing "Instructor 1"
+ # Instructor = df[df['Instructor'] == option]
+ # Instructor_comment = Instructor['comment'].tolist()
+ # ##################################################### BERT MODEL
+ # def perform_sentiment_analysis(text):
+ # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
+ # with torch.no_grad():
+ # outputs = model(**inputs)
+ # logits = outputs.logits
+ # predicted_class = torch.argmax(logits, dim=1).item()
+ # sentiment_labels = ["negative", "neutral", "positive"]
+ # sentiment = sentiment_labels[predicted_class]
+ # return sentiment
+
+
+ # from transformers import BertForSequenceClassification, BertTokenizer
+
+ # # Load the model and tokenizer
+ # model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+ # tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+ # # sample_texts_tfidf = vectorizer.transform(sample_texts)
+ # # sample_predictions = classifier.predict(sample_texts_tfidf)
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text in Instructor_comment:
+ # predicted_class = perform_sentiment_analysis(text)
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {predicted_class}")
+ # sample_predictions.append(predicted_class)
+ # if predicted_class == "negative":
+ # negative_count += 1
+ # elif predicted_class == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### scikit learn model
+
+ # import joblib
+ # # Load the model and vectorizer for predictions
+ # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl")
+
+ # # Transform the new text data using the loaded vectorizer
+ # new_text_features = loaded_vectorizer.transform(Instructor_comment)
+
+ # # Make predictions using the loaded model
+ # predicted_class = loaded_model.predict(new_text_features)
+ # # print(f"Predicted class: {predicted_class}")
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text, prediction in zip(Instructor_comment, predicted_class):
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {prediction}")
+ # sample_predictions.append(prediction)
+ # if prediction == "negative":
+ # negative_count += 1
+ # elif prediction == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### bert2 model
+ import torch
+ from transformers import BertTokenizer, BertForSequenceClassification
+ import numpy as np
+
+ # Load the saved model
+ loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model')
+ tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
+
+
+ # Encode the sample comments
+ sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt')
+
+ # Make predictions on the sample comments
+ sample_input_ids = sample_encodings['input_ids']
+ sample_attention_mask = sample_encodings['attention_mask']
+
+ with torch.no_grad():
+ sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask)
+
+ # Get predicted labels
+ sample_logits = sample_outputs.logits
+ sample_predictions = np.argmax(sample_logits, axis=1)
+
+ # Map predicted labels back to sentiment labels
+ sentiment_labels = ['negative', 'positive']
+ predicted_sentiments = [sentiment_labels[label] for label in sample_predictions]
+
+
+ # # Print the comments and predicted sentiments
+ # for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ # print(f"Comment: {comment}")
+ # print(f"Predicted Sentiment: {sentiment}")
+ # print()
+
+ sample_predictions = []
+
+ # Initialize counters for sentiment classes
+ negative_count = 0
+ neutral_count = 0
+ positive_count = 0
+
+ # print(predicted_sentiments)
+ # print(Instructor_comment)
+
+ for text, prediction in zip(Instructor_comment, predicted_sentiments):
+ print(f"Text: {text}")
+ print(f"Predicted Sentiment: {prediction}")
+ sample_predictions.append(prediction)
+ if prediction == "negative":
+ negative_count += 1
+ elif prediction == "neutral":
+ neutral_count += 1
+ else:
+ positive_count += 1
+
+ print(f'negative_count {negative_count}')
+ print(f'neutral_count {neutral_count}')
+ print(f'positive_count {positive_count}')
+
+ ###################################################
+
+ # Create a Streamlit app
+ st.title("Sentiment Analysis Dashboard")
+ st.sidebar.header("Settings")
+
+ link_text = "Instructor Survey"
+ link_url = "https://forms.gle/64n9CXMDRP2NYgZYA"
+ st.sidebar.markdown(f"[{link_text}]({link_url})")
+
+
+ # Display sentiment counts
+ st.write("### Sentiment Counts")
+ st.write(f"Negative: {negative_count}")
+ # st.write(f"Neutral: {neutral_count}")
+ st.write(f"Positive: {positive_count}")
+
+ # Plot sentiment distribution
+ sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts()
+ desired_order = ['positive',
+ # 'neutral',
+ 'negative']
+ sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
+ percentage_distribution = sentiment_counts / len(sample_predictions) * 100
+
+ st.write("### Sentiment Distribution")
+ fig, ax = plt.subplots(figsize=(8, 6))
+ bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red'])
+ plt.xlabel('Sentiment')
+ plt.ylabel('Count')
+ plt.title('Sentiment Distribution in Sample Predictions')
+ plt.xticks(rotation=45)
+ for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order):
+ height = bar.get_height()
+ ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom')
+ st.pyplot(fig)
+
+ st.set_option('deprecation.showPyplotGlobalUse', False)
+
+ # Generate word clouds based on sentiment categories
+ sentiment_texts = {
+ 'positive': [],
+ # 'neutral': [],
+ 'negative': []
+ }
+
+ for text, sentiment in zip(Instructor_comment, sample_predictions):
+ sentiment_texts[sentiment].append(text)
+
+ text_for_llama = ""
+
+ for sentiment, texts in sentiment_texts.items():
+ combined_texts = ' '.join(texts)
+ combined_texts = combined_texts.split()
+ filtered_words = [word for word in combined_texts if len(word) > 2]
+ combined_texts = ' '.join(filtered_words)
+ if combined_texts =="": continue
+ # Load your custom TrueType font using PIL
+ font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file
+ # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed
+ # Set the font family to use the TrueType font
+ # font = ImageFont.truetype(font_path)
+
+ wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts)
+ st.write(f"### Word Cloud for {sentiment} Sentiment")
+ plt.figure(figsize=(10, 6))
+ plt.imshow(wordcloud, interpolation='bilinear')
+ plt.axis('off')
+ st.pyplot()
+
+ if sentiment == "negative":
+ # Extract the text from the word cloud object
+ generated_text = wordcloud.words_
+
+ # Print the generated text
+ for word, frequency in generated_text.items():
+ # print(f"{word}: {frequency}")
+ text_for_llama += str(word)+" "
+
+
+ # Generate a word cloud from all the text data
+ all_text = ' '.join(Instructor_comment)
+ all_text = all_text.split()
+ filtered_words = [word for word in all_text if len(word) > 3]
+ all_text = ' '.join(filtered_words)
+
+ st.write("### Word Cloud for All Sentiments")
+ wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text)
+ plt.figure(figsize=(8, 8), facecolor=None)
+ plt.imshow(wordcloud)
+ plt.axis("off")
+ st.pyplot()
+
+ neg_comments = []
+ pos_comments = []
+ # Print the comments and predicted sentiments
+ for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ if sentiment == "positive": pos_comments.append(comment)
+ else: neg_comments.append(comment)
+
+
+ if text_for_llama == "":
+ st.title("Expressing Gratitude and Dedication")
+ text_for_llama = f"""
+ There's no negative feedback/comments to the instructor, give him/her short email to say.
+ [Your Name] = The Management
+ [Instructor's Name] = {Instructor}
+ """
+ else:
+ st.title('Recommendation:')
+ text_for_llama = text_for_llama.split()
+ text_for_llama = ", ".join(text_for_llama)
+ text_for_llama = f"""
+ Based from these students' feedback: {str(text_for_llama)}. \n
+ Please generate a short email to teh instructor having 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+ define the best email subject based from the recomendation
+ [Your Name] = The Management
+ [Instructor's Name] = {Instructor}
+
+ """
+
+ # text_for_llama = f"""
+ # Based from these students' feedback: {str(text_for_llama)}. \n
+ # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+
+ # """
+
+ # text_for_llama = f"""
+ # Based from these students' feedback: {str(text_for_llama)}. \n
+ # and Overall score per criteria results: {str(criteria_results)}. \n
+ # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+ # """
+ # Then give insights about the evaluation report based from different criteria.
+ # Here is the results: {criteria_results}
+ # Your response format-
+ # Recommendation to Instructor:
+ # Insights on Evaluation Report:
+
+
+
+ prompt = text_for_llama
+
+
+
+ # # ================================================ replicate.com
+ # CUDA_LAUNCH_BLOCKING=1
+ # import replicate
+ # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW')
+ # output = replicate.run(
+ # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
+ # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
+ # input={"prompt": prompt}
+ # )
+ # st.write(output)
+ # # The meta/llama-2-70b-chat model can stream output as it's running.
+ # # The predict method returns an iterator, and you can iterate over that output.
+ # # ================================================
+
+
+ # # st.title('Recommendation:')
+ # # llama_output = ""
+ # # with st.spinner("Generating Recommendation"):
+ # # loading_text = st.empty()
+ # # for item in reponse(prompt):
+ # # llama_output +=item
+ # # loading_text.write(llama_output)
+ # # st.success("Generation Complete!")
+
+ # # ================================================ local llama llm_chain
+ while True:
+ try:
+ with st.spinner("Generating...."):
+ # st.write(llm_chain.run(prompt))
+ # st.write(g4f_prompt(prompt)) #################
+ st.success("Generation Complete!")
+ break
+
+ except Exception as e:
+ # Handle the error (e.g., log it or take appropriate action)
+ # Sleep for a moment before retrying
+ # st.write("Error occurred.. Retrying")
+ pass
+ # time.sleep(0.4)
+ # # ================================================
+
+
+
+
+
+
diff --git a/app5_selectbox/evaluation_analysis copy.py b/app5_selectbox/evaluation_analysis copy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7cbf0aa42ea9e6e4cfb5885c0d65e026650a3f0
--- /dev/null
+++ b/app5_selectbox/evaluation_analysis copy.py
@@ -0,0 +1,330 @@
+import gspread
+import pandas as pd
+from oauth2client.service_account import ServiceAccountCredentials
+from transformers import BertForSequenceClassification, BertTokenizer
+import torch
+import streamlit as st
+from matplotlib import pyplot as plt
+import numpy as np
+from wordcloud import WordCloud
+from PIL import ImageFont
+from app5_selectbox.langchain_llama_gpu import llm_chain
+
+# # Load the model and tokenizer
+# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+def eval_analysis(Instructor, Instructor_comment, criteria_results):
+ # # Authenticate with Google Sheets API
+ # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
+ # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope)
+ # client = gspread.authorize(creds)
+
+ # # Open the spreadsheet by its title
+ # spreadsheet = client.open('survey (Responses)')
+
+ # # Select a specific worksheet
+ # worksheet = spreadsheet.worksheet('Form Responses 1')
+
+ # # Read data from the worksheet
+ # data = worksheet.get_all_values()
+
+ # # Create a Pandas DataFrame from the data
+ # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers
+ # df = df.iloc[:, [1, 2]] # Filter columns
+
+ # #
+ # instructor_list = df.iloc[:, 0].unique()
+ # instructor_list = sorted(instructor_list)
+ # # print(instructor_list)
+
+ # # Create a dropdown widget in the sidebar
+ # option = st.sidebar.selectbox("Select an option", instructor_list)
+
+ # # Filter rows containing "Instructor 1"
+ # Instructor = df[df['Instructor'] == option]
+ # Instructor_comment = Instructor['comment'].tolist()
+ # ##################################################### BERT MODEL
+ # def perform_sentiment_analysis(text):
+ # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
+ # with torch.no_grad():
+ # outputs = model(**inputs)
+ # logits = outputs.logits
+ # predicted_class = torch.argmax(logits, dim=1).item()
+ # sentiment_labels = ["negative", "neutral", "positive"]
+ # sentiment = sentiment_labels[predicted_class]
+ # return sentiment
+
+
+ # from transformers import BertForSequenceClassification, BertTokenizer
+
+ # # Load the model and tokenizer
+ # model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+ # tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+ # # sample_texts_tfidf = vectorizer.transform(sample_texts)
+ # # sample_predictions = classifier.predict(sample_texts_tfidf)
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text in Instructor_comment:
+ # predicted_class = perform_sentiment_analysis(text)
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {predicted_class}")
+ # sample_predictions.append(predicted_class)
+ # if predicted_class == "negative":
+ # negative_count += 1
+ # elif predicted_class == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### scikit learn model
+
+ # import joblib
+ # # Load the model and vectorizer for predictions
+ # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl")
+
+ # # Transform the new text data using the loaded vectorizer
+ # new_text_features = loaded_vectorizer.transform(Instructor_comment)
+
+ # # Make predictions using the loaded model
+ # predicted_class = loaded_model.predict(new_text_features)
+ # # print(f"Predicted class: {predicted_class}")
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text, prediction in zip(Instructor_comment, predicted_class):
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {prediction}")
+ # sample_predictions.append(prediction)
+ # if prediction == "negative":
+ # negative_count += 1
+ # elif prediction == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### bert2 model
+ import torch
+ from transformers import BertTokenizer, BertForSequenceClassification
+ import numpy as np
+
+ # Load the saved model
+ loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model')
+ tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
+
+
+ # Encode the sample comments
+ sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt')
+
+ # Make predictions on the sample comments
+ sample_input_ids = sample_encodings['input_ids']
+ sample_attention_mask = sample_encodings['attention_mask']
+
+ with torch.no_grad():
+ sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask)
+
+ # Get predicted labels
+ sample_logits = sample_outputs.logits
+ sample_predictions = np.argmax(sample_logits, axis=1)
+
+ # Map predicted labels back to sentiment labels
+ sentiment_labels = ['negative', 'positive']
+ predicted_sentiments = [sentiment_labels[label] for label in sample_predictions]
+
+ # # Print the comments and predicted sentiments
+ # for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ # print(f"Comment: {comment}")
+ # print(f"Predicted Sentiment: {sentiment}")
+ # print()
+
+ sample_predictions = []
+
+ # Initialize counters for sentiment classes
+ negative_count = 0
+ neutral_count = 0
+ positive_count = 0
+
+
+ for text, prediction in zip(Instructor_comment, predicted_sentiments):
+ print(f"Text: {text}")
+ print(f"Predicted Sentiment: {prediction}")
+ sample_predictions.append(prediction)
+ if prediction == "negative":
+ negative_count += 1
+ elif prediction == "neutral":
+ neutral_count += 1
+ else:
+ positive_count += 1
+
+ print(f'negative_count {negative_count}')
+ print(f'neutral_count {neutral_count}')
+ print(f'positive_count {positive_count}')
+
+ ###################################################
+
+ # Create a Streamlit app
+ st.title("Sentiment Analysis Dashboard")
+ st.sidebar.header("Settings")
+
+ link_text = "Instructor Survey"
+ link_url = "https://forms.gle/64n9CXMDRP2NYgZYA"
+ st.sidebar.markdown(f"[{link_text}]({link_url})")
+
+
+ # Display sentiment counts
+ st.write("### Sentiment Counts")
+ st.write(f"Negative: {negative_count}")
+ # st.write(f"Neutral: {neutral_count}")
+ st.write(f"Positive: {positive_count}")
+
+ # Plot sentiment distribution
+ sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts()
+ desired_order = ['positive',
+ # 'neutral',
+ 'negative']
+ sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
+ percentage_distribution = sentiment_counts / len(sample_predictions) * 100
+
+ st.write("### Sentiment Distribution")
+ fig, ax = plt.subplots(figsize=(8, 6))
+ bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red'])
+ plt.xlabel('Sentiment')
+ plt.ylabel('Count')
+ plt.title('Sentiment Distribution in Sample Predictions')
+ plt.xticks(rotation=45)
+ for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order):
+ height = bar.get_height()
+ ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom')
+ st.pyplot(fig)
+
+ st.set_option('deprecation.showPyplotGlobalUse', False)
+
+ # Generate word clouds based on sentiment categories
+ sentiment_texts = {
+ 'positive': [],
+ # 'neutral': [],
+ 'negative': []
+ }
+
+ for text, sentiment in zip(Instructor_comment, sample_predictions):
+ sentiment_texts[sentiment].append(text)
+
+ text_for_llama = ""
+
+ for sentiment, texts in sentiment_texts.items():
+ combined_texts = ' '.join(texts)
+ combined_texts = combined_texts.split()
+ filtered_words = [word for word in combined_texts if len(word) > 2]
+ combined_texts = ' '.join(filtered_words)
+ if combined_texts =="": continue
+ # Load your custom TrueType font using PIL
+ font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file
+ # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed
+ # Set the font family to use the TrueType font
+ # font = ImageFont.truetype(font_path)
+
+ wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts)
+ st.write(f"### Word Cloud for {sentiment} Sentiment")
+ plt.figure(figsize=(10, 6))
+ plt.imshow(wordcloud, interpolation='bilinear')
+ plt.axis('off')
+ st.pyplot()
+
+ if sentiment == "negative":
+ # Extract the text from the word cloud object
+ generated_text = wordcloud.words_
+
+ # Print the generated text
+ for word, frequency in generated_text.items():
+ # print(f"{word}: {frequency}")
+ text_for_llama += str(word)+" "
+
+
+ # Generate a word cloud from all the text data
+ all_text = ' '.join(Instructor_comment)
+ all_text = all_text.split()
+ filtered_words = [word for word in all_text if len(word) > 3]
+ all_text = ' '.join(filtered_words)
+
+ st.write("### Word Cloud for All Sentiments")
+ wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text)
+ plt.figure(figsize=(8, 8), facecolor=None)
+ plt.imshow(wordcloud)
+ plt.axis("off")
+ st.pyplot()
+
+ neg_comments = []
+ pos_comments = []
+ # Print the comments and predicted sentiments
+ for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ if sentiment == "positive": pos_comments.append(comment)
+ else: neg_comments.append(comment)
+
+ text_for_llama = text_for_llama.split()
+ text_for_llama = ", ".join(text_for_llama)
+ text_for_llama = f"""
+ Based from these students' feedback: {str(text_for_llama)}. \n
+ Please generate a recommendation to the instructor. Make it in sentence type and in English language only.
+ Then give insights about the evaluation report based from different criteria.
+ Here is the results: {criteria_results}
+ Your response format-
+ Recommendation to Instructor:
+ Insights on Evaluation Report:
+
+ """
+
+ prompt = text_for_llama
+ # # ================================================ replicate.com
+ # CUDA_LAUNCH_BLOCKING=1
+ # import replicate
+ # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW')
+ # output = replicate.run(
+ # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
+ # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
+ # input={"prompt": prompt}
+ # )
+ # # The meta/llama-2-70b-chat model can stream output as it's running.
+ # # The predict method returns an iterator, and you can iterate over that output.
+ # # ================================================
+
+
+ # st.title('Recommendation:')
+ # llama_output = ""
+ # with st.spinner("Generating Recommendation"):
+ # loading_text = st.empty()
+ # for item in reponse(prompt):
+ # llama_output +=item
+ # loading_text.write(llama_output)
+ # st.success("Generation Complete!")
+
+ st.title('Recommendation:')
+ llama_output = ""
+ with st.spinner("Generating Recommendation"):
+ st.write(llm_chain.run(prompt))
+ st.success("Generation Complete!")
+
+
+
+
diff --git a/app5_selectbox/evaluation_analysis.py b/app5_selectbox/evaluation_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..82a3a4fa0ec1673da6d31173b5d3a40324c93a78
--- /dev/null
+++ b/app5_selectbox/evaluation_analysis.py
@@ -0,0 +1,347 @@
+import gspread
+import pandas as pd
+from oauth2client.service_account import ServiceAccountCredentials
+from transformers import AutoTokenizer, AutoModelForSequenceClassification
+import torch
+import re
+import streamlit as st
+import hydralit_components as hc
+from matplotlib import pyplot as plt
+import numpy as np
+from wordcloud import WordCloud
+import plotly.graph_objs as go
+import plotly.express as px
+import plotly.figure_factory as ff
+from PIL import ImageFont
+# from app5_selectbox.langchain_llama_gpu import llm_chain
+from app5_selectbox.g4f_prompt import g4f_prompt
+# from app5_selectbox.llama2_prompt import llama_prompt
+from app5_selectbox.naive_bayes_cl import nb_clf
+
+from HF_inference import analyze_sintement
+
+models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL']
+
+
+# old path
+# model_list = [
+# r"/home/aibo/prototype_v1/BERT_BASE/bert_sentiment_model",
+# r"/home/aibo/prototype_v1/BERT_LARGE/bert_sentiment_model",
+# r"/home/aibo/prototype_v1/DISTILIBERT/bert_sentiment_model"
+# ]
+
+# new path
+model_list = [
+ r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-bert-base-uncased",
+ r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-bert-large-uncased",
+ r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-distilbert-base-uncased",
+]
+
+model_tokenizer_list = ['bert-base-uncased', 'bert-large-uncased', 'distilbert-base-uncased']
+
+selected_model = 0
+llama2_g4f = False # true == llama2
+
+# if 'chkbx_selected_model' in st.session_state:
+# st.write("selected model: ",models.index(st.session_state.chkbx_selected_model))
+# if 'chkbx_selected_model' not in st.session_state:
+# st.write("no selected!")
+
+
+def clean_text(text_list):
+ cleaned_samples = []
+ for text_sample in text_list:
+ # Case folding and normalization
+ cleaned_text = str(text_sample).lower()
+
+ # Removing non-alphabetic characters
+ cleaned_text = re.sub(r'[^a-zA-Z\s]', '', cleaned_text)
+
+ cleaned_samples.append(cleaned_text)
+ return cleaned_samples
+
+
+# local model
+def classify_sentiments(text_samples, tokenizer, model):
+ instructor_comments = []
+ predicted_sentiments = []
+ predicted_sentiments_scores = []
+
+ # Iterate through the text samples and classify the sentiment
+ for idx, text_sample in enumerate(text_samples):
+ # Tokenize the text sample
+ inputs = tokenizer(text_sample, return_tensors="pt")
+
+ # Perform sentiment classification
+ outputs = model(**inputs)
+
+ # Get the predicted sentiment (positive/negative)
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
+
+ # Get the probabilities for each class
+ probabilities = torch.softmax(outputs.logits, dim=1).tolist()[0]
+
+ # Store results
+ instructor_comments.append(text_sample)
+ predicted_sentiments.append("positive" if predicted_class == 1 else "negative")
+ predicted_sentiments_scores.append({"positive": probabilities[1]*100, "negative": probabilities[0]*100})
+ return instructor_comments, predicted_sentiments, predicted_sentiments_scores
+
+
+# # inference
+# def classify_sentiments(text_samples, model):
+# instructor_comments = []
+# predicted_sentiments = []
+# predicted_sentiments_scores = []
+
+# # text = ["i love this", "nice one!", "happy!"]
+# selected_model = model
+# results = [analyze_sintement(t, selected_model) for t in text_samples]
+
+
+
+# for idx, result in enumerate(results):
+# # st.text(result[0])
+# # predicted_class, probabilities = analyze_sintement(text_sample, model)
+# # Store results
+# instructor_comments.append(text_samples[idx])
+# predicted_sentiments.append("positive" if result[0] == "LABEL_1" else "negative")
+# predicted_sentiments_scores.append({"positive": result[1]*100, "negative": 100-(result[1]*100)})
+
+# # st.write(instructor_comments)
+# return instructor_comments, predicted_sentiments, predicted_sentiments_scores
+
+
+
+def calculate_average_scores(probability_list):
+ total_comments = len(probability_list)
+ total_positive = 0
+ total_negative = 0
+
+ for prob_dict in probability_list:
+ total_positive += prob_dict['positive']
+ total_negative += prob_dict['negative']
+
+ average_positive = total_positive / total_comments
+ average_negative = total_negative / total_comments
+ return average_positive, average_negative
+
+def eval_analysis(instructor, instructor_comment, criteria_results, selected_model):
+ if selected_model < 3:
+ model = model_list[selected_model]
+ # model_tokenizer = model_tokenizer_list[selected_model]
+ model_tokenizer = model_list[selected_model]
+ loaded_model = AutoModelForSequenceClassification.from_pretrained(model)
+ tokenizer = AutoTokenizer.from_pretrained(model_tokenizer)
+
+ clean_instructor_comment = clean_text(instructor_comment)
+
+ predicted_sentiments_transformer = classify_sentiments(clean_instructor_comment, tokenizer, loaded_model) # local model
+ # predicted_sentiments_transformer = classify_sentiments(clean_instructor_comment, models[selected_model]) # inference
+
+ predicted_sentiments = predicted_sentiments_transformer[1]
+ scores = predicted_sentiments_transformer[2]
+
+
+ elif selected_model == 3:
+ try:
+ instructor_comment, predicted_sentiments, scores = nb_clf(instructor_comment)
+ # scores = scores[1]
+ except Exception as e:
+ st.exception(e)
+ else: pass
+
+ sample_predictions = []
+ comments_data = []
+ negative_count = 0
+ neutral_count = 0
+ positive_count = 0
+ # average_sintement_score = np.average(scores['positive'])
+
+ average_positive, average_negative = calculate_average_scores(scores)
+
+ # st.text(calculate_average_scores(scores))
+ for text, prediction, score in zip(instructor_comment, predicted_sentiments, scores):
+ sample_predictions.append(prediction)
+ comments_data.append((text, prediction, score['positive']))
+ if prediction == "negative":
+ negative_count += 1
+ elif prediction == "neutral":
+ neutral_count += 1
+ else:
+ positive_count += 1
+
+ sentiment_texts = {
+ 'positive': [],
+ 'negative': []
+ }
+
+ for text, sentiment in zip(instructor_comment, sample_predictions):
+ sentiment_texts[sentiment].append(text)
+
+ text_for_llama = ""
+
+
+ def sentiment_tbl():
+ # Create DataFrame
+ comments_df = pd.DataFrame(instructor_comment, columns=["Comments"])
+
+ # Drop index
+ comments_df_display = comments_df.copy()
+ comments_df_display.reset_index(drop=True, inplace=True)
+
+ # Create DataFrame
+ comments_data_df = pd.DataFrame(comments_data, columns=["Comments", "Sentiment", "Score"])
+ # Define a function to apply row-wise styling
+ def highlight_row(row):
+ if row["Sentiment"] == "positive":
+ return ['background-color: lightgreen'] * len(row)
+ elif row["Sentiment"] == "negative":
+ return ['background-color: lightcoral'] * len(row)
+ else:
+ return [''] * len(row)
+
+ # Set index to start at 1
+ comments_data_df.index += 1
+
+ # Apply styling
+ styled_df = comments_data_df.style.apply(highlight_row, axis=1)
+
+ # Display styled DataFrame
+ st.table(styled_df)
+
+
+ theme_bad = {'bgcolor': '#FFF0F0','title_color': 'red','content_color': 'red','icon_color': 'red', 'icon': 'fa fa-times-circle'}
+ theme_good = {'bgcolor': '#EFF8F7','title_color': 'green','content_color': 'green','icon_color': 'green', 'icon': 'fa fa-check-circle'}
+
+ st.write(f"### SENTIMENTS/RECOMENDATION INSIGHTS")
+ with st.expander("Sentiment Analysis"):
+ st.title("Sentiment Analysis Dashboard")
+ st.write(f"## Using {models[selected_model]}")
+ st.write("### Sentiment Rating")
+
+ cc = st.columns(2)
+ with cc[0]:
+ # can just use 'good', 'bad', 'neutral' sentiment to auto color the card
+ hc.info_card(title='Positive', content=str(round(average_positive,6))+ '%', sentiment='good', bar_value=round(average_positive,6))
+ with cc[1]:
+ hc.info_card(title='Negative', content=str(round(average_negative,6))+ '%', sentiment='bad', bar_value=round(average_negative,6))
+
+ # st.write(f"#### Positive: {positive_count} - {round(average_positive,6)} %")
+ # st.write(f"#### Negative: {negative_count} - {round(average_negative,6)} %")
+
+ # st.write("### Sentiment Rating")
+ # st.write(f"#### Positive: {round(average_positive*100,2)} %")
+ # st.write(f"#### Negative: {round(average_negative*100,2)} %")
+
+
+ sentiment_counts = pd.Series(sample_predictions).value_counts()
+ desired_order = ['positive', 'negative']
+ sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
+ percentage_distribution = sentiment_counts / len(sample_predictions) * 100
+
+ sentiment_tbl()
+
+ st.write("### Sentiment Distribution")
+
+ fig = go.Figure(layout=dict(
+ autosize=True, # Set autosize to True for automatic adjustment
+ ))
+ fig.add_trace(go.Bar(
+ x=percentage_distribution.index,
+ y=sentiment_counts.values,
+ marker_color=['green', 'red'],
+ text=[f'{percentage:.2f}% {des_order.upper()}' for percentage, des_order in zip(percentage_distribution, desired_order)],
+ textposition='auto'
+ ))
+
+ fig.update_layout(
+ width=600,
+ height=500,
+ xaxis=dict(title='Sentiment', tickangle=45),
+ yaxis=dict(title='Count'),
+ title='Sentiment Distribution in Sample Predictions',
+ )
+
+ st.plotly_chart(fig)
+
+ for sentiment, texts in sentiment_texts.items():
+ combined_texts = ' '.join(texts)
+ combined_texts = combined_texts.split()
+ filtered_words = [word for word in combined_texts if len(word) > 2]
+ combined_texts = ' '.join(filtered_words)
+
+ if combined_texts == "":
+ continue
+
+ font_path = "/home/aibo/prototype_v1/prototype/app5_selectbox/QuartzoBold-W9lv.ttf"
+ wordcloud = WordCloud(font_path=font_path, width=800, height=600, background_color='white', max_words=15, min_word_length=3, stopwords={}).generate(combined_texts)
+
+ st.write(f"### Word Cloud for {sentiment.capitalize()} Sentiment")
+
+ plt.figure(figsize=(10, 6))
+ plt.imshow(wordcloud, interpolation='bilinear')
+ plt.axis("off")
+ wordcloud_fig = plt.gcf()
+ st.pyplot(wordcloud_fig)
+
+ if sentiment == "negative":
+ text_for_llama = sentiment_texts[sentiment]
+
+
+ # Generate a word cloud from all the text data
+ all_text = ' '.join(instructor_comment)
+ all_text = all_text.split()
+ filtered_words = [word for word in all_text if len(word) > 2]
+ all_text = ' '.join(filtered_words)
+ st.write("### Word Cloud for All Sentiments")
+ wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200, min_word_length=3, stopwords={}).generate(all_text)
+ # Create a Matplotlib figure
+ plt.figure(figsize=(8, 8))
+ plt.imshow(wordcloud, interpolation='bilinear')
+ plt.axis("off")
+ wordcloud_fig = plt.gcf()
+ st.pyplot(wordcloud_fig)
+
+ if text_for_llama == "":
+ with st.expander("Expressing Gratitude and Dedication"):
+ st.title("Expressing Gratitude and Dedication")
+ text_for_llama = f"""
+ There's no negative feedback or comment for the instructor; give him or her a short letter to say.
+ [Your Name] = The Management
+ [Instructor's Name] = {instructor}
+ """
+ prompt = text_for_llama
+ while True:
+ try:
+ with st.spinner("Generating...."):
+ if not llama2_g4f: st.write(g4f_prompt(prompt)) #################
+ # else: st.write(llama_prompt(prompt)) #################
+ st.success("Generation Complete!")
+ break
+ except Exception as e:
+ pass
+ else:
+ with st.expander("Recommendation"):
+ # st.title('Recommendation:')
+ # text_for_llama = text_for_llama.split()
+ text_for_llama = ", ".join(text_for_llama)
+ text_for_llama = f"""
+ Based on these students' feedback: {str(text_for_llama)}. \n
+ Please generate a short letter to the instructor with ten recommendations in bullet format. Make it in sentence type and English only.
+ Define the best letter's subject based on the recommendation.
+ Subject is Recommendations for Effective Teaching
+ Sender's Name is 'The Management'
+ receiver's or Instructor's Name is {instructor}
+
+ """
+ prompt = text_for_llama
+ while True:
+ try:
+ with st.spinner("Generating...."):
+ if not llama2_g4f: st.write(g4f_prompt(prompt)) #################
+ # else: st.write(llama_prompt(prompt)) #################
+ st.success("Generation Complete!")
+ break
+ except Exception as e:
+ pass
\ No newline at end of file
diff --git a/app5_selectbox/evaluation_analysis_g4f.py b/app5_selectbox/evaluation_analysis_g4f.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dcb582c0c31a4fd55086181c02b4721deaf6ff2
--- /dev/null
+++ b/app5_selectbox/evaluation_analysis_g4f.py
@@ -0,0 +1,382 @@
+import gspread
+import pandas as pd
+from oauth2client.service_account import ServiceAccountCredentials
+from transformers import BertForSequenceClassification, BertTokenizer
+import torch
+import streamlit as st
+from matplotlib import pyplot as plt
+import numpy as np
+from wordcloud import WordCloud
+from PIL import ImageFont
+# from app5_selectbox.langchain_llama_gpu import llm_chain
+from app5_selectbox.g4f_prompt import g4f_prompt
+from app5_selectbox.df4_sentiment_analysis import sentiment_func
+
+# # Load the model and tokenizer
+# model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+# tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+def eval_analysis(Instructor, Instructor_comment, criteria_results):
+ # # Authenticate with Google Sheets API
+ # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
+ # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope)
+ # client = gspread.authorize(creds)
+
+ # # Open the spreadsheet by its title
+ # spreadsheet = client.open('survey (Responses)')
+
+ # # Select a specific worksheet
+ # worksheet = spreadsheet.worksheet('Form Responses 1')
+
+ # # Read data from the worksheet
+ # data = worksheet.get_all_values()
+
+ # # Create a Pandas DataFrame from the data
+ # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers
+ # df = df.iloc[:, [1, 2]] # Filter columns
+
+ # #
+ # instructor_list = df.iloc[:, 0].unique()
+ # instructor_list = sorted(instructor_list)
+ # # print(instructor_list)
+
+ # # Create a dropdown widget in the sidebar
+ # option = st.sidebar.selectbox("Select an option", instructor_list)
+
+ # # Filter rows containing "Instructor 1"
+ # Instructor = df[df['Instructor'] == option]
+ # Instructor_comment = Instructor['comment'].tolist()
+ # ##################################################### BERT MODEL
+ # def perform_sentiment_analysis(text):
+ # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
+ # with torch.no_grad():
+ # outputs = model(**inputs)
+ # logits = outputs.logits
+ # predicted_class = torch.argmax(logits, dim=1).item()
+ # sentiment_labels = ["negative", "neutral", "positive"]
+ # sentiment = sentiment_labels[predicted_class]
+ # return sentiment
+
+
+ # from transformers import BertForSequenceClassification, BertTokenizer
+
+ # # Load the model and tokenizer
+ # model = BertForSequenceClassification.from_pretrained("./sentiment_model")
+ # tokenizer = BertTokenizer.from_pretrained("./sentiment_model")
+
+ # # sample_texts_tfidf = vectorizer.transform(sample_texts)
+ # # sample_predictions = classifier.predict(sample_texts_tfidf)
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text in Instructor_comment:
+ # predicted_class = perform_sentiment_analysis(text)
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {predicted_class}")
+ # sample_predictions.append(predicted_class)
+ # if predicted_class == "negative":
+ # negative_count += 1
+ # elif predicted_class == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### scikit learn model
+
+ # import joblib
+ # # Load the model and vectorizer for predictions
+ # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl")
+
+ # # Transform the new text data using the loaded vectorizer
+ # new_text_features = loaded_vectorizer.transform(Instructor_comment)
+
+ # # Make predictions using the loaded model
+ # predicted_class = loaded_model.predict(new_text_features)
+ # # print(f"Predicted class: {predicted_class}")
+
+ # sample_predictions = []
+
+ # # Initialize counters for sentiment classes
+ # negative_count = 0
+ # neutral_count = 0
+ # positive_count = 0
+
+
+ # for text, prediction in zip(Instructor_comment, predicted_class):
+ # print(f"Text: {text}")
+ # print(f"Predicted Sentiment: {prediction}")
+ # sample_predictions.append(prediction)
+ # if prediction == "negative":
+ # negative_count += 1
+ # elif prediction == "neutral":
+ # neutral_count += 1
+ # else:
+ # positive_count += 1
+
+ # print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ # print(f'positive_count {positive_count}')
+
+ ################################################### bert2 model
+ import torch
+ from transformers import BertTokenizer, BertForSequenceClassification
+ import numpy as np
+
+ # Load the saved model
+ loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model')
+ tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
+
+
+ # Encode the sample comments
+ sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt')
+
+ # Make predictions on the sample comments
+ sample_input_ids = sample_encodings['input_ids']
+ sample_attention_mask = sample_encodings['attention_mask']
+
+ with torch.no_grad():
+ sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask)
+
+ # Get predicted labels
+ sample_logits = sample_outputs.logits
+ sample_predictions = np.argmax(sample_logits, axis=1)
+
+
+ # Map predicted labels back to sentiment labels
+ sentiment_labels = ['negative', 'positive']
+ # predicted_sentiments = [sentiment_labels[label] for label in sample_predictions]
+
+ predicted_sentiments = sentiment_func(Instructor_comment)
+ print(predicted_sentiments)
+
+ # # Print the comments and predicted sentiments
+ # for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ # print(f"Comment: {comment}")
+ # print(f"Predicted Sentiment: {sentiment}")
+ # print()
+
+ sample_predictions = []
+
+ # Initialize counters for sentiment classes
+ negative_count = 0
+ neutral_count = 0
+ positive_count = 0
+
+ # print(predicted_sentiments)
+ # print(Instructor_comment)
+
+ for text, prediction in zip(Instructor_comment, predicted_sentiments):
+ print(f"Text: {text}")
+ print(f"Predicted Sentiment: {prediction}")
+ sample_predictions.append(prediction)
+ if prediction == "negative":
+ negative_count += 1
+ elif prediction == "neutral":
+ neutral_count += 1
+ else:
+ positive_count += 1
+
+ print(f'negative_count {negative_count}')
+ # print(f'neutral_count {neutral_count}')
+ print(f'positive_count {positive_count}')
+
+ ###################################################
+
+ # Create a Streamlit app
+ st.title("Sentiment Analysis Dashboard")
+ st.sidebar.header("Settings")
+
+ link_text = "Instructor Survey"
+ link_url = "https://forms.gle/64n9CXMDRP2NYgZYA"
+ st.sidebar.markdown(f"[{link_text}]({link_url})")
+
+
+ # Display sentiment counts
+ st.write("### Sentiment Counts")
+ st.write(f"Negative: {negative_count}")
+ # st.write(f"Neutral: {neutral_count}")
+ st.write(f"Positive: {positive_count}")
+
+ # Plot sentiment distribution
+ sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts()
+ desired_order = ['positive',
+ # 'neutral',
+ 'negative']
+ sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0)
+ percentage_distribution = sentiment_counts / len(sample_predictions) * 100
+
+ st.write("### Sentiment Distribution")
+ fig, ax = plt.subplots(figsize=(8, 6))
+ bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red'])
+ plt.xlabel('Sentiment')
+ plt.ylabel('Count')
+ plt.title('Sentiment Distribution in Sample Predictions')
+ plt.xticks(rotation=45)
+ for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order):
+ height = bar.get_height()
+ ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom')
+ st.pyplot(fig)
+
+ st.set_option('deprecation.showPyplotGlobalUse', False)
+
+ # Generate word clouds based on sentiment categories
+ sentiment_texts = {
+ 'positive': [],
+ # 'neutral': [],
+ 'negative': []
+ }
+
+ for text, sentiment in zip(Instructor_comment, sample_predictions):
+ sentiment_texts[sentiment].append(text)
+
+ text_for_llama = ""
+
+ for sentiment, texts in sentiment_texts.items():
+ combined_texts = ' '.join(texts)
+ combined_texts = combined_texts.split()
+ filtered_words = [word for word in combined_texts if len(word) > 2]
+ combined_texts = ' '.join(filtered_words)
+ if combined_texts =="": continue
+ # Load your custom TrueType font using PIL
+ font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file
+ # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed
+ # Set the font family to use the TrueType font
+ # font = ImageFont.truetype(font_path)
+
+ wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts)
+ st.write(f"### Word Cloud for {sentiment} Sentiment")
+ plt.figure(figsize=(10, 6))
+ plt.imshow(wordcloud, interpolation='bilinear')
+ plt.axis('off')
+ st.pyplot()
+
+ if sentiment == "negative":
+ # Extract the text from the word cloud object
+ generated_text = wordcloud.words_
+
+ # Print the generated text
+ for word, frequency in generated_text.items():
+ # print(f"{word}: {frequency}")
+ text_for_llama += str(word)+" "
+
+
+ # Generate a word cloud from all the text data
+ all_text = ' '.join(Instructor_comment)
+ all_text = all_text.split()
+ filtered_words = [word for word in all_text if len(word) > 3]
+ all_text = ' '.join(filtered_words)
+
+ st.write("### Word Cloud for All Sentiments")
+ wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text)
+ plt.figure(figsize=(8, 8), facecolor=None)
+ plt.imshow(wordcloud)
+ plt.axis("off")
+ st.pyplot()
+
+ neg_comments = []
+ pos_comments = []
+ # Print the comments and predicted sentiments
+ for comment, sentiment in zip(Instructor_comment, predicted_sentiments):
+ if sentiment == "positive": pos_comments.append(comment)
+ else: neg_comments.append(comment)
+
+
+ if text_for_llama == "":
+ st.title("Expressing Gratitude and Dedication")
+ text_for_llama = f"""
+ There's no negative feedback/comments to the instructor, give him/her short email to say.
+ [Your Name] = The Management
+ [Instructor's Name] = {Instructor}
+ """
+ else:
+ st.title('Recommendation:')
+ text_for_llama = text_for_llama.split()
+ text_for_llama = ", ".join(text_for_llama)
+ text_for_llama = f"""
+ Based from these students' feedback: {str(text_for_llama)}. \n
+ Please generate a short email to teh instructor having 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+ define the best email subject based from the recomendation
+ [Your Name] = The Management
+ [Instructor's Name] = {Instructor}
+
+ """
+
+ # text_for_llama = f"""
+ # Based from these students' feedback: {str(text_for_llama)}. \n
+ # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+
+ # """
+
+ # text_for_llama = f"""
+ # Based from these students' feedback: {str(text_for_llama)}. \n
+ # and Overall score per criteria results: {str(criteria_results)}. \n
+ # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only.
+ # """
+ # Then give insights about the evaluation report based from different criteria.
+ # Here is the results: {criteria_results}
+ # Your response format-
+ # Recommendation to Instructor:
+ # Insights on Evaluation Report:
+
+
+
+ prompt = text_for_llama
+
+
+
+ # # ================================================ replicate.com
+ # CUDA_LAUNCH_BLOCKING=1
+ # import replicate
+ # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW')
+ # output = replicate.run(
+ # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
+ # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
+ # input={"prompt": prompt}
+ # )
+ # st.write(output)
+ # # The meta/llama-2-70b-chat model can stream output as it's running.
+ # # The predict method returns an iterator, and you can iterate over that output.
+ # # ================================================
+
+
+ # # st.title('Recommendation:')
+ # # llama_output = ""
+ # # with st.spinner("Generating Recommendation"):
+ # # loading_text = st.empty()
+ # # for item in reponse(prompt):
+ # # llama_output +=item
+ # # loading_text.write(llama_output)
+ # # st.success("Generation Complete!")
+
+ # # ================================================ local llama llm_chain
+ while True:
+ try:
+ with st.spinner("Generating...."):
+ # st.write(llm_chain.run(prompt))
+ # st.write(g4f_prompt(prompt)) #################
+ st.success("Generation Complete!")
+ break
+
+ except Exception as e:
+ # Handle the error (e.g., log it or take appropriate action)
+ # Sleep for a moment before retrying
+ # st.write("Error occurred.. Retrying")
+ pass
+ # time.sleep(0.4)
+ # # ================================================
+
+
+
+
+
+
diff --git a/app5_selectbox/evaluation_fac.py b/app5_selectbox/evaluation_fac.py
new file mode 100644
index 0000000000000000000000000000000000000000..d18c56cca80796ca5def11f775d702ff802caf52
--- /dev/null
+++ b/app5_selectbox/evaluation_fac.py
@@ -0,0 +1,528 @@
+import streamlit as st
+import pandas as pd
+import plotly.graph_objs as go
+import time
+import plotly.express as px
+import ast
+import numpy as np
+
+from app5_selectbox.database_con import cursor, db_connection
+from app5_selectbox.app5_selectbox_func import generate_unique_4
+from app5_selectbox.evaluation_analysis import eval_analysis
+# from app5_selectbox.evaluation_analysis_g4f import eval_analysis
+
+# from app5_selectbox.langchain_llama_gpu import llm_chain
+from app5_selectbox.g4f_prompt import g4f_prompt
+
+
+# st.title("Student-Faculty Evaluation")
+
+
+
+
+
+# st.write(st.session_state.student_id)
+# Function to fetch evaluation data
+def fetch_evaluation_data():
+ cursor.execute("SELECT * FROM evaluation")
+ evaluation_data = cursor.fetchall()
+ if not evaluation_data:
+ st.warning("No evaluation data found.")
+ return None
+ column_names = [i[0] for i in cursor.description]
+ return pd.DataFrame(evaluation_data, columns=column_names)
+
+# Function to analyze instructors
+def analyze_instructors(evaluation_df):
+ if evaluation_df is None:
+ return
+
+ column_names = evaluation_df.columns[4:14]
+ criteria_labels = [column.replace("_", " ") for column in column_names]
+
+ cursor.execute("SELECT * FROM instructor")
+ instructor_data = cursor.fetchall()
+
+ instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor name","program code", "user name", "password"])
+ instructor_avg_scores = evaluation_df.groupby("inst_id")[column_names].mean().reset_index()
+ instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left")
+
+
+ # st.write(instructor_avg_scores)
+ # programs_list = sorted(instructor_avg_scores["program code"].unique())
+
+ # # Fetch program options from the program table
+ # cursor.execute("SELECT prog_id, prog_code, prog_name FROM program")
+ # selected_program = pd.DataFrame(cursor.fetchall(), columns=["prog_id", "prog_code", "prog_name"])
+ # st.write(selected_program)
+ # # st.write(list({str(prog): prog[0] for prog in program_options}))
+ # selected_program_select = st.selectbox("Select Program", selected_program["prog_code"])
+ # # selected_program = ast.literal_eval(str(selected_program))
+
+ # # selected_program = st.selectbox("Select Program", programs_list)
+ # filtered_instructor_list = pd.DataFrame(instructor_avg_scores)
+ # # st.write(filtered_instructor_list)
+ # mask = filtered_instructor_list["program code"] == selected_program.loc[selected_program['prog_code'] == selected_program_select, 'prog_id'].values[0]
+ # # st.write(mask)
+ # filtered_instructor_list = filtered_instructor_list.loc[mask]
+
+ # # st.write(filtered_instructor_list)
+ # instructors_list = sorted(filtered_instructor_list["instructor name"].unique())
+ # # print(type(instructor_avg_scores))
+
+ # instructors_list = instructor_avg_scores.query("program code == {selected_program}")
+ # st.write(len(instructors_list)) # df to graph
+
+ # selected_instructor = st.selectbox("Select Instructor", instructors_list)
+ selected_instructor = st.session_state.inst_name
+
+ try:
+ filtered_data = evaluation_df[evaluation_df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst_id"].values[0]]
+ selected_instructor_comments = list(filtered_data["comments"])
+ st.write(f"## Welcome! {selected_instructor}")
+ st.subheader(f"You are Evaluated by: {len(selected_instructor_comments)} students")
+ except:
+ st.info("### No Existing Evaluation Found!",icon="❗")
+
+
+ models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL']
+ with st.sidebar.expander("Settings"):
+ # enable_analyze_graph = st.checkbox("Analyze graph by LLM", value=False)
+ global enable_llm_analyze_sintement, sentiment_model, sentiment_model_index
+ enable_llm_analyze_sintement = st.checkbox("Enable LLM (LLAMA)", value=False)
+ if enable_llm_analyze_sintement:
+ sentiment_model = st.selectbox("Select Model for Sentiment Analysis:", models)
+ sentiment_model_index = models.index(sentiment_model)
+ if st.button("Log Out", type="primary", use_container_width=True):
+ st.session_state.pop("logged_in", None)
+ st.session_state.pop("inst_id", None)
+ st.session_state.pop("inst_name", None)
+ st.session_state.pop("prog_id", None)
+ st.session_state.pop("user_type", None)
+ st.experimental_rerun()
+ st.button("Refresh", use_container_width=True)
+
+
+ cursor.execute("""
+ SELECT subj_inst.subj_inst_id, subject.sub_name
+ FROM subj_inst
+ INNER JOIN subject
+ ON subj_inst.sub_id_code = subject.sub_id_code
+ """)
+
+ # Assuming you have a DataFrame named 'filtered_data'
+ # and column_names is a list of column names you want to consider for calculating average scores
+
+ # Convert all columns to numeric data
+ filtered_data[column_names] = filtered_data[column_names].apply(pd.to_numeric, errors='coerce')
+
+ # Fetch subject data from the cursor
+ subject_data = cursor.fetchall()
+
+ # Create a DataFrame for subject data
+ subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub name"])
+
+ # Merge subject data with filtered data based on 'subj_inst_id'
+ filtered_data = filtered_data.merge(subject_df, on="subj_inst_id", how="left")
+
+ # Group by subject name and calculate average scores
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+
+ # Calculate total average and add it as a new column
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+ subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index()
+ subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1)
+
+
+
+
+ cursor.execute(f"""
+ SELECT
+ s.class_id,
+ pr.prog_code || '-' || c.class_year || '-' || c.class_section AS class_info,
+ COUNT(DISTINCT s.stud_id) AS num_respondents,
+ ROUND((AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
+ AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
+ AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
+ AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10, 2) AS avg_overall,
+ ROUND((COUNT(DISTINCT s.stud_id) * (AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) +
+ AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) +
+ AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) +
+ AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10), 2) AS weighted_avg_overall
+ FROM
+ evaluation e
+ JOIN
+ student s ON e.stud_id = s.stud_id
+ JOIN
+ class c ON s.class_id = c.class_id
+ JOIN
+ program pr ON c.prog_id = pr.prog_id
+ WHERE
+ s.stud_id IN {tuple(list(filtered_data["stud_id"]))}
+ GROUP BY
+ s.class_id, pr.prog_code, c.class_year, c.class_section, class_info;
+ """)
+
+ avg_scores_per_class = pd.DataFrame(cursor.fetchall(), columns=[
+ "class_id",
+ "class_info",
+ "num_respondents",
+ "avg_overall",
+ "weighted_avg_overall"
+ ])
+
+ # Calculate the last row's weighted_avg_overall / num_respondents
+ last_row_index = avg_scores_per_class["weighted_avg_overall"].last_valid_index()
+ if last_row_index is not None:
+ avg_scores_per_class.at[last_row_index, "weighted_avg_overall"] /= avg_scores_per_class.at[last_row_index, "num_respondents"]
+
+ # Convert the column to decimal.Decimal before rounding
+ avg_scores_per_class["weighted_avg_overall"] = avg_scores_per_class["num_respondents"] * avg_scores_per_class["avg_overall"] # avg_scores_per_class["weighted_avg_overall"].apply(lambda x: round(float(x), 2))
+
+ # Drop rows with None values
+ avg_scores_per_class = avg_scores_per_class.dropna()
+
+
+ # Calculate the overall averages for avg_overall and weighted_avg_overall
+ num_respondents = round(avg_scores_per_class["num_respondents"].sum(), 2)
+ overall_avg_overall = round(avg_scores_per_class["avg_overall"].mean(), 2)
+ overall_weighted_avg_overall = round(avg_scores_per_class["weighted_avg_overall"].sum(),2)
+ weighted_avg_overall = round(overall_weighted_avg_overall / num_respondents,2)
+
+ # # Append an additional row for avg_overall and weighted_avg_overall
+ # avg_scores_per_class = avg_scores_per_class.append({
+ # "class_id": int(avg_scores_per_class["class_id"].max()) + 1,
+ # "class_info": "Total",
+ # "num_respondents": avg_scores_per_class["num_respondents"].sum(),
+ # "avg_overall": round(overall_avg_overall, 2),
+ # "weighted_avg_overall": round(overall_weighted_avg_overall / avg_scores_per_class["num_respondents"].sum(), 2)
+ # }, ignore_index=True)
+
+ # st.write(avg_scores_per_class.style.set_properties(**{'text-align': 'center'}))
+
+
+
+ # Add summary rows to the DataFrame
+ avg_scores_per_class = avg_scores_per_class.append({
+ "class_id": "",
+ "class_info": "Summary",
+ "num_respondents": num_respondents,
+ "avg_overall": " ",
+ "weighted_avg_overall": overall_weighted_avg_overall
+ }, ignore_index=True)
+
+
+ def get_color(weighted_avg_overall):
+ satisfaction_level = calculate_satisfaction(weighted_avg_overall)
+ if satisfaction_level == "Outstanding":
+ return "rgb(171, 235, 198 )"
+ elif satisfaction_level == "Above Average":
+ return "rgb(218, 247, 166)"
+ elif satisfaction_level == "Average":
+ return "rgb(255, 195, 0)"
+ elif satisfaction_level == "Below Average":
+ return "rgb(255, 87, 51)"
+ else:
+ return "rgb(255, 87, 51)"
+
+ def calculate_satisfaction(weighted_avg_overall):
+ if weighted_avg_overall > 4:
+ return "Outstanding"
+ elif weighted_avg_overall > 3:
+ return "Above Average"
+ elif weighted_avg_overall > 2:
+ return "Average"
+ elif weighted_avg_overall > 1:
+ return "Below Average"
+ else:
+ return "Unsatisfactory"
+
+ def highlight_cell(col, col_label, row_label):
+ # check if col is a column we want to highlight
+ if col.name == col_label:
+ # a boolean mask where True represents a row we want to highlight
+ mask = (col.index == row_label)
+ # return an array of string styles (e.g. ["", "background-color: yellow"])
+ # return ["background-color: lightgreen" if val_bool else "" for val_bool in mask]
+ return [f"background-color: {get_color(weighted_avg_overall)}" if val_bool else "" for val_bool in mask]
+ else:
+ # return an array of empty strings that has the same size as col (e.g. ["",""])
+ return np.full_like(col, "", dtype="str")
+
+
+
+
+
+ avg_scores_per_class = avg_scores_per_class.append({
+ "class_id": "",
+ "class_info": "Weighted Avg.",
+ "num_respondents": " ", # You can set this to "N/A" or any appropriate value
+ "avg_overall": calculate_satisfaction(weighted_avg_overall), # You can set this to "N/A" or any appropriate value
+ "weighted_avg_overall": weighted_avg_overall
+ }, ignore_index=True)
+
+
+ # # st.dataframe(avg_scores_per_class.style.background_gradient(subset=["C"], cmap="RdYlGn", vmin=0, vmax=2.5))
+
+ last_row = avg_scores_per_class.index[-1]
+ # avg_scores_per_class =avg_scores_per_class.style.apply(highlight_cell, col_label="avg_overall", row_label=last_row)
+ # Assuming avg_scores_per_class is your DataFrame
+
+
+ # Rename columns
+ avg_scores_per_class.rename(columns={'class_id': 'CLASS ID',
+ 'class_info': 'SECTION',
+ 'num_respondents': 'NO. of RESPONDENTS',
+ 'avg_overall': 'AVERAGE',
+ 'weighted_avg_overall': 'WEIGHTED AVERAGE'}, inplace=True)
+
+ # Format numeric values to two decimal places
+ avg_scores_per_class = avg_scores_per_class.applymap(lambda x: '{:.2f}'.format(x) if isinstance(x, float) else x)
+
+ # Get the last row index
+ last_row = avg_scores_per_class.index[-1]
+
+ # Apply any specific styling
+ avg_scores_per_class = avg_scores_per_class.style.apply(highlight_cell, col_label="AVERAGE", row_label=last_row)
+
+ # Drop index column
+ avg_scores_per_class.hide_index()
+
+ # Render DataFrame without index column
+ # st.dataframe(avg_scores_per_class_no_index)
+
+ # avg_scores_per_class.style.apply(lambda x: ["background: red" if v > x.iloc[3] else "" for v in x], axis = 1)
+
+ # avg_scores_per_class = pd.DataFrame(avg_scores_per_class)
+ # avg_scores_per_class.set_index('CLASS ID', inplace=True)
+ # avg_scores_per_class.reset_index(drop=True, inplace=True)
+ # st.write(type(avg_scores_per_class))
+ # avg_scores_per_class.reset_index(drop=True, inplace=True)
+ # st.markdown(avg_scores_per_class.style.hide(axis="index").to_html(), unsafe_allow_html=True)
+ # avg_scores_per_class1 = avg_scores_per_class.style.hide()
+
+
+
+ # # Convert DataFrame to HTML without index column
+ # avg_scores_per_class_html = avg_scores_per_class.to_html(index=False)
+
+ # Use CSS to hide the index column
+ avg_scores_per_class_html = avg_scores_per_class.render()
+ avg_scores_per_class_html = avg_scores_per_class_html.replace('