diff --git a/HF_inference.cpython-310.pyc b/HF_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dd045b2b3dd5036622c2843464da08834bf5269 Binary files /dev/null and b/HF_inference.cpython-310.pyc differ diff --git a/HF_inference.py b/HF_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..66b90ba1ed563913d108b58fd8f6a8e60c644d8c --- /dev/null +++ b/HF_inference.py @@ -0,0 +1,92 @@ +# import requests +# import time +# import streamlit as st +# import os + + +# # SECRET_TOKEN +# SECRET_TOKEN = os.getenv("HF_IBOA") + +# DISTILIBERT = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased" +# BERTLARGE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased" +# BERTBASE = "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased" + +# headers = {"Authorization": SECRET_TOKEN} + +# # @st.cache_resource +# @st.cache_resource(experimental_allow_widgets=True, show_spinner=False) +# def query(payload, selected_model): +# if selected_model == "DISTILIBERT MODEL": +# API_URL = DISTILIBERT +# elif selected_model == "BERT-LARGE MODEL": +# API_URL = BERTLARGE +# elif selected_model == "BERT-BASE MODEL": +# API_URL = BERTBASE +# else: +# API_URL = DISTILIBERT + +# start_time = time.time() +# counter = 0 +# with st.spinner("Processing..."): +# while True: +# response = requests.post(API_URL, headers=headers, json=payload) +# # st.write(response) +# if response.status_code == 200: + +# return response.json() +# else: +# time.sleep(1) # Wait for 1 second before retrying + +# def analyze_sintement(text, selected_model): +# output = query({"inputs": text}, selected_model) +# if output: +# # st.success(f"Translation complete!") +# return output[0][0]['label'], output[0][0]['score'] +# else: +# st.warning("Error! Please try again.") + + + +import requests +import time +import streamlit as st +import os + +# Define constants for API URLs +MODEL_URLS = { + "DISTILIBERT MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-distilbert-base-uncased", + "BERT-LARGE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-large-uncased", + "BERT-BASE MODEL": "https://api-inference.huggingface.co/models/MENG21/stud-fac-eval-bert-base-uncased" +} + +# SECRET_TOKEN +SECRET_TOKEN = os.getenv("HF_IBOA") + +# Set headers +headers = {"Authorization": SECRET_TOKEN} + +# Define retry parameters +MAX_RETRIES = 3 +RETRY_INTERVAL = 1 # in seconds + +@st.cache_resource(experimental_allow_widgets=True, show_spinner=False) +def query(payload, selected_model): + # st.write(selected_model) + API_URL = MODEL_URLS.get(selected_model, MODEL_URLS[selected_model]) # Get API URL based on selected model + + for retry in range(MAX_RETRIES): + response = requests.post(API_URL, headers=headers, json=payload) + if response.status_code == 200: + return response.json() + else: + time.sleep(RETRY_INTERVAL) + + return None + +def analyze_sintement(text, selected_model): + output = query({"inputs": text}, selected_model) + if output: + return output[0][0]['label'], output[0][0]['score'] + else: + st.warning("Error! Please try again.") + pass \ No newline at end of file diff --git a/__pycache__/HF_inference.cpython-310.pyc b/__pycache__/HF_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dd045b2b3dd5036622c2843464da08834bf5269 Binary files /dev/null and b/__pycache__/HF_inference.cpython-310.pyc differ diff --git a/__pycache__/app5.cpython-310.pyc b/__pycache__/app5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34cb8bf1c02e830a0619fd09cbd3e03e86246e17 Binary files /dev/null and b/__pycache__/app5.cpython-310.pyc differ diff --git a/app5.cpython-310.pyc b/app5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34cb8bf1c02e830a0619fd09cbd3e03e86246e17 Binary files /dev/null and b/app5.cpython-310.pyc differ diff --git a/app5.py b/app5.py new file mode 100644 index 0000000000000000000000000000000000000000..a33acb3e12b92b973bb468eb6bc2affe71a3bbf0 --- /dev/null +++ b/app5.py @@ -0,0 +1,204 @@ +import streamlit as st +import time +from app5_selectbox import academic_list, class_tbl, instructor, program, student, subject, subj_inst, evaluation, evaluation_fac +from app5_selectbox.database_con import cursor, db_connection + + +def student_login(username, password): + cursor.execute(f"SELECT s.stud_id, s.stud_name, s.class_id, s.user_type FROM student s WHERE s.stud_username='{username}' AND s.stud_password='{password}'") + return cursor.fetchone() + +def instructor_login(username, password): + cursor.execute(f"SELECT i.inst_id, i.inst_name, i.prog_id FROM instructor i WHERE i.inst_username='{username}' AND i.inst_password='{password}'") + return cursor.fetchone() + + +def app5(): + st.title("Student-Faculty Evaluation") + + if not hasattr(st.session_state, "logged_in") or not st.session_state.logged_in: + st.subheader("User Login") + username = st.text_input("Username") + password = st.text_input("Password", type="password") + + if st.button("Login", type="primary"): + student_info = student_login(username, password) + + if student_info: + st.success(f"Hello, {student_info[1]}! Login Successful") + st.session_state.logged_in = True + st.session_state.student_id = student_info[0] + st.session_state.class_id = student_info[2] + st.session_state.user_type = student_info[3] + time.sleep(1) + st.experimental_rerun() + + elif not student_info: + instructor_info = instructor_login(username, password) + if instructor_info: + st.success(f"Hello, {instructor_info[1]}! Login Successful") + st.session_state.logged_in = True + st.session_state.inst_id = instructor_info[0] + st.session_state.inst_name = instructor_info[1] + st.session_state.prog_id = instructor_info[2] + st.session_state.user_type = 'faculty' + time.sleep(1) + st.experimental_rerun() + else: + st.error("Invalid Credentials") + + else: + st.error("Invalid Credentials") + else: + if st.session_state.user_type == 'student': + cursor.execute(f"SELECT s.stud_name, c.class_year, c.class_section FROM student s JOIN class c ON s.class_id = c.class_id WHERE s.stud_id='{st.session_state.student_id}'") + student_info = cursor.fetchone() + student_name, class_year, class_section = student_info + + st.subheader(f"Hello, {student_name} (Class Year: {class_year}, Section: {class_section}) - Student Evaluation") + + cursor.execute(f""" + SELECT si.subj_inst_id, si.sub_id_code, sub.sub_name, i.inst_name + FROM subj_inst si + LEFT JOIN evaluation e ON e.subj_inst_id = si.subj_inst_id AND e.stud_id = {st.session_state.student_id} + INNER JOIN subject sub ON sub.sub_id_code = si.sub_id_code + INNER JOIN instructor i ON i.inst_id = si.inst_id + WHERE e.stud_id IS NULL AND si.class_id = '{st.session_state.class_id}' + """) + + subjects = cursor.fetchall() + subject_names = [f"{subject[2]} with Instructor: {subject[3]}" for subject in subjects] + if not subjects: + st.warning("You have evaluated all available subjects. Thank you!") + st.balloons() + + progress_text = "logging-out . ..." + my_bar = st.progress(0, text=progress_text) + for percent_complete in range(100): + time.sleep(0.01) + my_bar.progress(percent_complete + 1, text=progress_text) + + cursor.execute(f"UPDATE student SET is_eval='TRUE' WHERE stud_id = '{st.session_state.student_id}'") + db_connection.commit() + st.session_state.pop("logged_in", None) + st.session_state.pop("student_id", None) + st.session_state.pop("class_id", None) + st.experimental_rerun() + + else: + selected_subject = st.selectbox("Select a Subject to Evaluate", subject_names) + selected_subject_id = None + + for sel_subject in subjects: + if f"{sel_subject[2]} with Instructor: {sel_subject[3]}" == selected_subject: + selected_subject_id = sel_subject[0] + + keys = {} + if selected_subject_id: + st.write(f"You are evaluating the {selected_subject}.") + criteria_list = [ + "Teaching Effectiveness", + "Course Organization", + "Accessibility and Communication", + "Assessment and Grading", + "Respect and Inclusivity", + "Engagement and Interactivity", + "Feedback and Improvement", + "Accessibility of Learning Resources", + "Passion and Enthusiasm", + "Professionalism and Ethical Conduct", + ] + criteria = {} + + for i in range(10): + criteria_key = f"criteria_{i}_{selected_subject_id}" + criteria_text = f"{criteria_list[i]} (1-5)" + criteria[i] = st.slider(criteria_text, 1.00, 5.00, 1.00, step=0.05, key=criteria_key) + keys[f"criteria_{i}"] = criteria_key + + feedback_comment_key = f"feedback_comment_{selected_subject_id}" + feedback_comment = st.text_area("Feedback/Comments", key=feedback_comment_key) + + if st.button("Submit Evaluation"): + if not feedback_comment: + st.warning("Please provide feedback comments.") + else: + cursor.execute(f"SELECT si.inst_id FROM subj_inst si WHERE si.subj_inst_id = '{selected_subject_id}'") + instructor_id = cursor.fetchone() + + if instructor_id: + instructor_id = instructor_id[0] + + cursor.execute(f"""INSERT INTO evaluation ( + stud_id, + subj_inst_id, + inst_id, + Teaching_Effectiveness, + Course_Organization, + Accessibility_and_Communication, + Assessment_and_Grading, + Respect_and_Inclusivity, + Engagement_and_Interactivity, + Feedback_and_Improvement, + Accessibility_of_Learning_Resources, + Passion_and_Enthusiasm, + Professionalism_and_Ethical_Conduct, + comments, + eval_timestamp) + VALUES ('{st.session_state.student_id}', '{selected_subject_id}', '{instructor_id}', '{criteria[0]}', '{criteria[1]}', '{criteria[2]}', '{criteria[3]}', '{criteria[4]}', '{criteria[5]}', '{criteria[6]}', '{criteria[7]}', '{criteria[8]}', '{criteria[9]}','{feedback_comment}', strftime('%Y-%m-%d %H:%M:%S','now'))""") + db_connection.commit() + + with st.empty(): + st.write("Submitting evaluation...") + time.sleep(0.3) + st.success("Evaluation submitted successfully") + time.sleep(0.4) + + feedback_comment = "" + + st.experimental_rerun() + else: + for i in keys.keys(): + keys[i] = None + feedback_comment = None + + if st.button("Log Out"): + st.session_state.pop("logged_in", None) + st.session_state.pop("student_id", None) + st.session_state.pop("class_id", None) + st.experimental_rerun() + elif st.session_state.user_type == 'faculty': + evaluation_fac.evaluation() + + elif st.session_state.user_type == 'admin': + table_name = st.sidebar.selectbox("Select Table", ("academic_list", "class", "instructor", "program", "student", "subject", "subj_inst", "evaluation")) + + if table_name == "academic_list": + academic_list.academic_list(table_name) + elif table_name == "class": + class_tbl.class_tbl(table_name) + elif table_name == "instructor": + instructor.instructor(table_name) + elif table_name == "program": + program.program(table_name) + elif table_name == "student": + student.student(table_name) + elif table_name == "subject": + subject.subject(table_name) + elif table_name == "subj_inst": + subj_inst.subj_inst(table_name) + elif table_name == "evaluation": + evaluation.evaluation() + else: + st.error("Select a valid table from the sidebar.") + + if st.button("Log Out"): + st.session_state.pop("logged_in", None) + st.session_state.pop("student_id", None) + st.session_state.pop("class_id", None) + st.experimental_rerun() + + +# Call the main function +if __name__ == "__main__": + app5() diff --git a/app5_selectbox/QuartzoBold-W9lv.ttf b/app5_selectbox/QuartzoBold-W9lv.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2fafbad1b1b9ad077a935d56de23adf0413a1410 Binary files /dev/null and b/app5_selectbox/QuartzoBold-W9lv.ttf differ diff --git a/app5_selectbox/__pycache__/academic_list.cpython-310.pyc b/app5_selectbox/__pycache__/academic_list.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ab59f1d2b6d0b6d6e14f6f0ffb59d9ba1ad4c8e Binary files /dev/null and b/app5_selectbox/__pycache__/academic_list.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/academic_list.cpython-39.pyc b/app5_selectbox/__pycache__/academic_list.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fa37b5546d32bf3178351b68aeed40f28eeffd8 Binary files /dev/null and b/app5_selectbox/__pycache__/academic_list.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fae158e526c3b11dcadf01252df183b9ec8d5dd Binary files /dev/null and b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26e7a7675d3b085a4ed1567fe19dbc827f891d60 Binary files /dev/null and b/app5_selectbox/__pycache__/app5_selectbox_func.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc b/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebeb39882e02df8becf336924a0a50e11632a063 Binary files /dev/null and b/app5_selectbox/__pycache__/class_tbl.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc b/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ba9d4a84fafe25e25f805420b4de0a6c35160b Binary files /dev/null and b/app5_selectbox/__pycache__/class_tbl.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/database.cpython-39.pyc b/app5_selectbox/__pycache__/database.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80cf69267c36fa2ddafe67f0eeb8fdba8f82107b Binary files /dev/null and b/app5_selectbox/__pycache__/database.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/database_con.cpython-310.pyc b/app5_selectbox/__pycache__/database_con.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4d6a6653dbd70299616d10f39355c8f17a7dc6d Binary files /dev/null and b/app5_selectbox/__pycache__/database_con.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/database_con.cpython-39.pyc b/app5_selectbox/__pycache__/database_con.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e3393879d514468862ffc927a2e008eaa4bd68e Binary files /dev/null and b/app5_selectbox/__pycache__/database_con.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/db_con.cpython-39.pyc b/app5_selectbox/__pycache__/db_con.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..104095742d1f8f26b3597816fdba590d3c76fed3 Binary files /dev/null and b/app5_selectbox/__pycache__/db_con.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc b/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..939872a1ea0f95f6ef70c0a0e649aefe681fa1d4 Binary files /dev/null and b/app5_selectbox/__pycache__/df4_sentiment_analysis.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91eeeff86fc632c80c49329ba0866f0e6fb2c70b Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3fadbfb5bb456f4ec8aae946cacc343572fe17a Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08d3772fcf03b66842532d42f631cd6ad3b4df8c Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c1a2bbad9847abe2eae1db7a2aa52401c64354 Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc b/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edadd89598b192129a7a4249f810a512f855e98d Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_analysis_g4f.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc b/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3f3fd84e888f2b462ffecdf26d02b9d8e376303 Binary files /dev/null and b/app5_selectbox/__pycache__/evaluation_fac.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc b/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..984e427a66751f41f313612c6f70932c446fca6e Binary files /dev/null and b/app5_selectbox/__pycache__/g4f_prompt.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc b/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b47a59f874743afe0a8e345b8217ce4f4a69e8 Binary files /dev/null and b/app5_selectbox/__pycache__/g4f_prompt.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/instructor.cpython-310.pyc b/app5_selectbox/__pycache__/instructor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e071509cfc4977cc96e836ee1f75446615fd1f1e Binary files /dev/null and b/app5_selectbox/__pycache__/instructor.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/instructor.cpython-39.pyc b/app5_selectbox/__pycache__/instructor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d1eb7e16a326294e729e5087f4906e9590bd172 Binary files /dev/null and b/app5_selectbox/__pycache__/instructor.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc b/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed7414086a3f01ebf785858198addd74ba922858 Binary files /dev/null and b/app5_selectbox/__pycache__/langchain_llama_gpu.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc b/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1a4c028ffb198c335fc89a7d1c50e0e69e38a8c Binary files /dev/null and b/app5_selectbox/__pycache__/llama2_prompt.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc b/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e30cc23706910219b9df0d9de41c5ac6578e6a1b Binary files /dev/null and b/app5_selectbox/__pycache__/load_llama2.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc b/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70d3c24d597ba706b8092c49c291ad0e47dfe38d Binary files /dev/null and b/app5_selectbox/__pycache__/naive_bayes_cl.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/program.cpython-310.pyc b/app5_selectbox/__pycache__/program.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..176e4a3d69c8874a5dda3a32264e85a402ae2244 Binary files /dev/null and b/app5_selectbox/__pycache__/program.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/program.cpython-39.pyc b/app5_selectbox/__pycache__/program.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9369c6cf5603463793b16527f8c0dd941beae34 Binary files /dev/null and b/app5_selectbox/__pycache__/program.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/student.cpython-310.pyc b/app5_selectbox/__pycache__/student.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89057e2a0d86fea11df4eb146c13fc8162f2f176 Binary files /dev/null and b/app5_selectbox/__pycache__/student.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/student.cpython-39.pyc b/app5_selectbox/__pycache__/student.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e8c683edeb58ea01c7e236c5d4c96b7283dd2a Binary files /dev/null and b/app5_selectbox/__pycache__/student.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc b/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd0fc7b0b7d1f698e0919b52f38505ee8842a7f5 Binary files /dev/null and b/app5_selectbox/__pycache__/subj_inst.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc b/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00a76e49c867a7ba6eda53f24ded4a5cdb3bf9c7 Binary files /dev/null and b/app5_selectbox/__pycache__/subj_inst.cpython-39.pyc differ diff --git a/app5_selectbox/__pycache__/subject.cpython-310.pyc b/app5_selectbox/__pycache__/subject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e9bdd42df15c7c220c60483aa033d0aa9f97c04 Binary files /dev/null and b/app5_selectbox/__pycache__/subject.cpython-310.pyc differ diff --git a/app5_selectbox/__pycache__/subject.cpython-39.pyc b/app5_selectbox/__pycache__/subject.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d0745f01864f89be62b8ba2bd29943aaf7041a0 Binary files /dev/null and b/app5_selectbox/__pycache__/subject.cpython-39.pyc differ diff --git a/app5_selectbox/academic_list.py b/app5_selectbox/academic_list.py new file mode 100644 index 0000000000000000000000000000000000000000..88d35afa04c8fcf0927223e73b8a66efe2fd15cf --- /dev/null +++ b/app5_selectbox/academic_list.py @@ -0,0 +1,32 @@ +# academic_list.py +import streamlit as st +from app5_selectbox.database_con import cursor, db_connection #connect_to_database, execute_query +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + + +def academic_list(table_name): + # Include the academic_list-specific code here + acad_id = generate_unique_4(cursor, "acad_id", table_name) + acad_year = st.text_input("Academic Year", key="acad_year") + sem_num = st.selectbox("Semester Number", ("1", "2"), key="sem_num") + + if st.button("Insert Academic List Record"): + # Check if the acad_year and sem_num are provided + if not acad_year or not sem_num: + st.error("Academic Year and Semester Number are required. Please provide values for both fields.") + else: + try: + # Check for duplicates in acad_year and sem_num + cursor.execute("SELECT acad_id FROM academic_list WHERE acad_year = %s AND sem_num = %s", (acad_year, sem_num)) + duplicate = cursor.fetchone() + if duplicate is not None: + st.error("Duplicate entry found. Please provide unique Academic Year and Semester Number.") + else: + # Insert a record into the academic_list table + cursor.execute("INSERT INTO academic_list (acad_id, acad_year, sem_num) VALUES (%s, %s, %s)", + (acad_id, acad_year, sem_num)) + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + display_table(cursor, table_name) diff --git a/app5_selectbox/app5_selectbox_func.py b/app5_selectbox/app5_selectbox_func.py new file mode 100644 index 0000000000000000000000000000000000000000..0c77b41e23303c2fdba934ef3c6bfacce3dcc3c7 --- /dev/null +++ b/app5_selectbox/app5_selectbox_func.py @@ -0,0 +1,31 @@ +import random +import streamlit as st +import time +import pandas as pd + +def generate_unique_4(cursor, col_id, tblname): + while True: + unique_id = random.randint(1000, 9999) + cursor.execute(f"SELECT {col_id} FROM {tblname} WHERE {col_id} = {unique_id}") + result = cursor.fetchone() + if result is None: + return unique_id + +def display_table(cursor, table_name): + try: + cursor.execute(f"pragma table_info('{table_name}')") + column_data = cursor.fetchall() + column_names = [column[1] for column in column_data] + + cursor.execute(f"SELECT * FROM {table_name}") + data = cursor.fetchall() + + if not data: + st.warning(f"No data found in the {table_name} table.") + else: + df = pd.DataFrame(data, columns=column_names) + st.header(f"{table_name} Table") + st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + + except Exception as e: + st.error(f"An error occurred while fetching data from {table_name}: {str(e)}") diff --git a/app5_selectbox/class_tbl.py b/app5_selectbox/class_tbl.py new file mode 100644 index 0000000000000000000000000000000000000000..f1f0a218667591a0cbfe8b6b17048639a5cb2138 --- /dev/null +++ b/app5_selectbox/class_tbl.py @@ -0,0 +1,47 @@ +# class.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + + +# In the display_table function, fetch and display prog_code +def display_class_table(cursor, table_name): + if table_name == "class": + cursor.execute("SELECT class.class_id, class.prog_id, program.prog_code, class.class_year, class.class_section FROM class INNER JOIN program ON class.prog_id = program.prog_id") + data = cursor.fetchall() + column_names = [i[0] for i in cursor.description] + df = pd.DataFrame(data, columns=column_names) + st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + +def class_tbl(table_name): + class_id = generate_unique_4(cursor, "class_id", table_name) + + # Fetch available programs from the 'program' table + cursor.execute("SELECT prog_id, prog_name, prog_code FROM program") + available_programs = cursor.fetchall() + prog_id = st.selectbox("Program ID", available_programs, format_func=lambda row: f"{row[1]} ({row[2]})", key="prog_id")[0] + class_year = st.selectbox("Class Year", ("1", "2", "3", "4"), key="class_year") + class_section = st.text_input("Class Section", key="class_section", max_chars=1).upper() + + if st.button("Insert Class Record"): + # Check if the class_year and class_section are provided + if not class_year or not class_section: + st.error("Class Year and Class Section are required. Please provide values for both fields.") + else: + try: + # Check for duplicates + cursor.execute("SELECT class_id FROM class WHERE prog_id = %s AND class_year = %s AND class_section = %s", + (prog_id, class_year, class_section)) + result = cursor.fetchone() + if result is not None: + st.error("A record with the same Program ID, Class Year, and Class Section already exists.") + else: + # Insert a record into the class table + cursor.execute("INSERT INTO class (class_id, prog_id, class_year, class_section) VALUES (%s, %s, %s, %s)", + (class_id, prog_id, class_year, class_section)) + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + display_class_table(cursor, table_name) \ No newline at end of file diff --git a/app5_selectbox/database_con.py b/app5_selectbox/database_con.py new file mode 100644 index 0000000000000000000000000000000000000000..37ad00856279c07e9611b292915be53c37307738 --- /dev/null +++ b/app5_selectbox/database_con.py @@ -0,0 +1,18 @@ +# # database.py +# import mysql.connector + +# # Connect to your MySQL database +# db_connection = mysql.connector.connect( +# host="localhost", +# user="root", +# password="", +# database="university_evaluation_5" +# ) + +# cursor = db_connection.cursor() + +#### for sqlite connection #### + +import sqlite3 +db_connection = sqlite3.connect('/home/aibo/prototype_v1/prototype/database/data.sqlite', check_same_thread=False) +cursor = db_connection.cursor() diff --git a/app5_selectbox/df4_sentiment_analysis.py b/app5_selectbox/df4_sentiment_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..5b54fdf6a2c30d8ff5065e7edbb5e035fce129a6 --- /dev/null +++ b/app5_selectbox/df4_sentiment_analysis.py @@ -0,0 +1,60 @@ +import g4f +import time + + +def sentiment_func(message_list): + message_list=[ + "Your lectures were so dull and uninspiring, I couldn't help but zone out", + "you have an extraordinary talent for leadership.", + "The instructor's indifference made it difficult to remain engaged or motivated", + "The lack of enthusiasm from the instructor made the class feel like a chore", + "Salamat sa iyong inspirasyon at dedikasyon sa aming edukasyon.", + "Sa bawat pagkakataon, lumalalim ang aming pag-unawa sa mga aralin.", + "Thanks for being dedicated to our education.", + "You show the societal importance of education.", + "The instructor's disinterested demeanor was reflected in the overall class atmosphere" + ] + + message_list = '[label]\n'.join(message_list) + # print(message_list) + prompt = f""" + Please provide a single-word response per sentence. + label the following sentences if it is positive,negative + sentence list = {message_list} + your output is should in comma separated + example output : positive,negative,negative,positive + """ + # Please provide a single-word response. + + print(prompt) + while True: + try: + # streamed completion + response = g4f.ChatCompletion.create( + model="gpt-3.5-turbo", + # provider=g4f.Provider.GeekGpt, + provider=g4f.Provider.You, + + # model="gpt-4", + # provider=g4f.Provider.Bing, + + messages=[{"role": "user", "content": prompt}], + stream=True, + ) + returned_output = "" + for message in response: + # print(message, flush=True, end='') + returned_output += message + # print(message) + returned_output = returned_output.split(',') + # Trim extra white spaces and convert to lowercase + returned_output = [item.strip().lower() for item in returned_output] + return returned_output + # print(returned_output) + break # Exit the loop if the chat completes successfully + + except Exception as e: + # Handle the error (e.g., log it or take appropriate action) + # Sleep for a moment before retrying + print("error....",e) + time.sleep(0.4) diff --git a/app5_selectbox/evaluation copy 2.py b/app5_selectbox/evaluation copy 2.py new file mode 100644 index 0000000000000000000000000000000000000000..3a976c54aef652f3558e5f56cd3fb983885beff5 --- /dev/null +++ b/app5_selectbox/evaluation copy 2.py @@ -0,0 +1,281 @@ +# evaluation.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 +from app5_selectbox.evaluation_analysis import eval_analysis + +import matplotlib.pyplot as plt +import seaborn as sns +import plotly.express as px +import plotly.graph_objs as go + + + +# Function to perform analytics on instructors +def analyze_instructors(cursor): + try: + # Execute the SQL query to fetch the evaluation data + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + + if not evaluation_data: + st.warning("No evaluation data found.") + else: + # Create a DataFrame from the fetched data and set column names + column_names = [i[0].replace("_", " ") for i in cursor.description] + df = pd.DataFrame(evaluation_data, columns=column_names) + + # Get the column names for the score criteria + criteria_columns = [f"score_criteria_{i}" for i in range(10)] + column_names = [column[0].replace("_", " ") for column in cursor.description][4:14] + # Define criteria labels globally + criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)] + + instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index() + + cursor.execute("SELECT inst_id, inst_name FROM instructor") + instructor_data = cursor.fetchall() + instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"]) + instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left") + + selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique()) + + filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]] + + selected_instructor_comments = list(filtered_data["comments"]) + st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students") + + cursor.execute(""" + SELECT subj_inst.subj_inst_id, subject.sub_name + FROM subj_inst + INNER JOIN subject + ON subj_inst.sub_id_code = subject.sub_id_code + """) + + subject_data = cursor.fetchall() + subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"]) + filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left") + + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + + fig = go.Figure() + + # for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]: + # fig.add_trace(go.Bar( + # x=subject_avg_scores["sub_name"], + # y=subject_avg_scores[criterion], + # name=label, + # )) + + criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)] + for criterion, label in criteria_labels: + fig.add_trace(go.Bar( + x=subject_avg_scores["sub name"], + y=subject_avg_scores[criterion], + name=label, + )) + + # Add the total average score above the bars + fig.add_trace(go.Scatter( + x=subject_avg_scores["sub name"], + y=subject_avg_scores["total average"], + mode="markers+text", + text=round(subject_avg_scores["total average"],2), + textposition="top center", + textfont=dict(size=14), + marker=dict(size=10, color="black"), + name="Total Average", + )) + + # Display the overall average of all subjects + overall_average = subject_avg_scores["total average"].mean() + # st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}") + fig.update_layout( + barmode="group", + title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}", + xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}", + yaxis_title="Average Score", + ) + st.plotly_chart(fig) + + + + # st.write("**Average score per Criteria**") + results_to_prompt = "Average score per Criteria\n" + criteria_averages = [] + for criteria in filtered_data.columns[4:14]: + average_score = round(sum(filtered_data[criteria] / len(filtered_data)), 2) + criteria_averages.append((criteria, average_score)) + results_to_prompt += f"{criteria}: {average_score}/5, \n" + # print(results_to_prompt) + + # st.write(results_to_prompt) + # # Create a Plotly bar chart + fig = go.Figure() + fig.add_trace(go.Bar( + x=[criteria for criteria, _ in criteria_averages], + y=[score for _, score in criteria_averages], + text=[f"{score}/5" for _, score in criteria_averages], + # textposition='outside', + )) + + fig.update_layout( + title="Average Score per Criteria", + xaxis_title="Criteria", + yaxis_title="Average Score", + ) + + st.plotly_chart(fig) + + + + + + + + + + + for subject in subject_avg_scores["sub name"]: + subject_filtered_data = filtered_data[filtered_data["sub name"] == subject] + + fig = go.Figure() + st.write(subject_filtered_data) + for criterion, label in criteria_labels: + fig.add_trace(go.Bar( + x=[label], + y=[subject_filtered_data[criterion].mean()], + text=[subject_filtered_data[criterion].mean()], + name=label, + )) + + # Calculate the "total average" based on criteria columns + total_average = subject_filtered_data[column_names].mean(axis=1).mean() + + # # dot point for Total Average" + # fig.add_trace(go.Scatter( + # x=[label], + # y=[total_average], + # mode="markers+text", + # text=[round(total_average, 2)], + # textposition="top center", + # textfont=dict(size=14), + # marker=dict(size=10, color="black"), + # name="Total Average", + # )) + + fig.update_layout( + barmode="group", + title=f"{subject} Average Score: {total_average:.2f}", + # xaxis_title=f"Overall Average Score: {total_average:.2f}", + yaxis_title="Average Score", + ) + st.plotly_chart(fig) + + # selected_instructor_comments.append(results_to_prompt) + # st.write(selected_instructor_comments) + return selected_instructor, selected_instructor_comments, results_to_prompt + + except Exception as e: + st.error(f"An error occurred during data analytics: {str(e)}") + + + # try: + # # Execute the SQL query to fetch the evaluation data + # cursor.execute("SELECT * FROM evaluation") + # evaluation_data = cursor.fetchall() + + # if not evaluation_data: + # st.warning("No evaluation data found.") + # else: + # # Create a DataFrame from the fetched data and set column names + # column_names = [i[0] for i in cursor.description] + # df = pd.DataFrame(evaluation_data, columns=column_names) + + # # Group data by instructor and calculate average scores per criteria + # instructor_avg_scores = df.groupby("inst_id").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + + # # Join with instructor data to get their names + # cursor.execute("SELECT inst_id, inst_name FROM instructor") + # instructor_data = cursor.fetchall() + # instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"]) + # instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left") + + # # Join with subj_inst and subject tables to get subject names + # cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code") + # subject_data = cursor.fetchall() + # subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"]) + # df = df.merge(subject_df, on="subj_inst_id", how="left") + + # # Create a select box to filter by instructor and subject + # selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique()) + # selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique() + # selected_subject = st.selectbox("Select Subject", selected_subjects) + + # # Filter data based on the selected instructor and subject + # filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) & + # (df["sub_name"] == selected_subject)] + + # # Create a bar chart for average scores per criteria + # fig = px.bar(instructor_avg_scores, x="instructor_name", + # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"], + # labels={"value": "Average Score", "variable": "Criteria"}, + # title="Average Scores per Criteria by Instructor") + # st.plotly_chart(fig) + + # # Group data by subject instructor and calculate average scores + # subject_avg_scores = filtered_data.groupby("sub_name").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + + # # Create a bar chart for average scores per criteria for the selected subject + # fig = px.bar(subject_avg_scores, x="sub_name", + # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"], + # labels={"value": "Average Score", "variable": "Criteria"}, + # title=f"Average Scores per Criteria for Subject {selected_subject}") + # st.plotly_chart(fig) + + # except Exception as e: + # st.error(f"An error occurred during data analytics: {str(e)}") + + + + + + +def evaluation(cursor, table_name): + try: + # Execute the SQL query to fetch the evaluation data + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + + if not evaluation_data: + st.warning("No evaluation data found.") + else: + # Create a DataFrame from the fetched data and set column names + column_names = [i[0] for i in cursor.description] + df = pd.DataFrame(evaluation_data, columns=column_names) + + # # Display the table with centered text + # st.header(f"{table_name} Table") + # st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + + analyze_instructors_results = analyze_instructors(cursor) + + if st.button("Analyze comments"): + # st.write(analyze_instructors_results[0], analyze_instructors_results[1]) + eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2]) + + + except Exception as e: + st.error(f"An error occurred while fetching evaluation data: {str(e)}") diff --git a/app5_selectbox/evaluation copy.py b/app5_selectbox/evaluation copy.py new file mode 100644 index 0000000000000000000000000000000000000000..341536db1914ca5a4e90df7666eb34bd9af0c417 --- /dev/null +++ b/app5_selectbox/evaluation copy.py @@ -0,0 +1,250 @@ +# evaluation.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 +from app5_selectbox.evaluation_analysis import eval_analysis + +import matplotlib.pyplot as plt +import seaborn as sns +import plotly.express as px +import plotly.graph_objs as go + + + +# Function to perform analytics on instructors +def analyze_instructors(cursor): + try: + # Execute the SQL query to fetch the evaluation data + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + + if not evaluation_data: + st.warning("No evaluation data found.") + else: + # Create a DataFrame from the fetched data and set column names + column_names = [i[0].replace("_"," ") for i in cursor.description] + # for i in range(len(column_names)): + # column_names[i] = column_names[i].replace("_"," ") + # st.write(column_names) + # .replace("_"," ") + df = pd.DataFrame(evaluation_data, columns=column_names) + + # # Group data by instructor and calculate average scores per criteria + # instructor_avg_scores = df.groupby("inst_id").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + # Get the column names from the cursor description + + + + criteria_columns = [f"score_criteria_{i}" for i in range(10)] + + column_names = [column[0].replace("_"," ") for column in cursor.description][4:14] + # Print the column names + + + instructor_avg_scores = df.groupby("inst id")[column_names].mean().reset_index() + + + # Join with instructor data to get their names + cursor.execute("SELECT inst_id, inst_name FROM instructor") + instructor_data = cursor.fetchall() + instructor_df = pd.DataFrame(instructor_data, columns=["inst id", "instructor name"]) + instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst id", how="left") + + + + + # Create a select box to filter by instructor + selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor name"].unique()) + + + + # Filter data based on the selected instructor + filtered_data = df[df["inst id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst id"].values[0]] + # st.write(filtered_data[filtered_data.columns[4:15]]) + + # st.write(selected_instructor) + selected_instructor_comments = list(filtered_data["comments"]) + # st.write(selected_instructor_comments) #get all comments fro the instructor + st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students") + + # Join with the subj_inst and subject tables to get subject names + cursor.execute(""" + SELECT subj_inst.subj_inst_id, subject.sub_name + FROM subj_inst + INNER JOIN subject + ON subj_inst.sub_id_code = subject.sub_id_code + """) + + subject_data = cursor.fetchall() + subject_df = pd.DataFrame(subject_data, columns=["subj inst id", "sub name"]) + filtered_data = filtered_data.merge(subject_df, on="subj inst id", how="left") + + # # Group data by subject and calculate average scores per criteria + # subject_avg_scores = filtered_data.groupby("sub_name").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + + # criteria_columns = [f"score_criteria_{i}" for i in range(10)] + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + + + # # Calculate the total average score for each subject + # subject_avg_scores["total_average"] = subject_avg_scores[["score_criteria_1", "score_criteria_2", "score_criteria_3"]].mean(axis=1) + # criteria_columns = [f"score_criteria_{i}" for i in range(10)] + + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + + + # Create a grouped bar chart for average scores per criteria by subject + fig = go.Figure() + + # for criterion, label in [("score_criteria_1", "Criteria 1"), ("score_criteria_2", "Criteria 2"), ("score_criteria_3", "Criteria 3")]: + # fig.add_trace(go.Bar( + # x=subject_avg_scores["sub_name"], + # y=subject_avg_scores[criterion], + # name=label, + # )) + + criteria_labels = [(f"{column_names[i]}", f"{column_names[i]}".replace("_", " ")) for i in range(10)] + for criterion, label in criteria_labels: + fig.add_trace(go.Bar( + x=subject_avg_scores["sub name"], + y=subject_avg_scores[criterion], + name=label, + )) + + # Add the total average score above the bars + fig.add_trace(go.Scatter( + x=subject_avg_scores["sub name"], + y=subject_avg_scores["total average"], + mode="markers+text", + text=round(subject_avg_scores["total average"],2), + textposition="top center", + textfont=dict(size=14), + marker=dict(size=10, color="black"), + name="Total Average", + )) + + # Display the overall average of all subjects + overall_average = subject_avg_scores["total average"].mean() + # st.write(f"Overall Average Score (All Subjects): {overall_average:.2f}") + fig.update_layout( + barmode="group", + title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}", + xaxis_title=f"Overall Average Score (All Subjects): {overall_average:.2f}", + yaxis_title="Average Score", + ) + st.plotly_chart(fig) + + + return selected_instructor, selected_instructor_comments + + + + except Exception as e: + st.error(f"An error occurred during data analytics: {str(e)}") + + + # try: + # # Execute the SQL query to fetch the evaluation data + # cursor.execute("SELECT * FROM evaluation") + # evaluation_data = cursor.fetchall() + + # if not evaluation_data: + # st.warning("No evaluation data found.") + # else: + # # Create a DataFrame from the fetched data and set column names + # column_names = [i[0] for i in cursor.description] + # df = pd.DataFrame(evaluation_data, columns=column_names) + + # # Group data by instructor and calculate average scores per criteria + # instructor_avg_scores = df.groupby("inst_id").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + + # # Join with instructor data to get their names + # cursor.execute("SELECT inst_id, inst_name FROM instructor") + # instructor_data = cursor.fetchall() + # instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor_name"]) + # instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left") + + # # Join with subj_inst and subject tables to get subject names + # cursor.execute("SELECT si.subj_inst_id, s.sub_name FROM subj_inst si INNER JOIN subject s ON si.sub_id_code = s.sub_id_code") + # subject_data = cursor.fetchall() + # subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub_name"]) + # df = df.merge(subject_df, on="subj_inst_id", how="left") + + # # Create a select box to filter by instructor and subject + # selected_instructor = st.selectbox("Select Instructor", instructor_avg_scores["instructor_name"].unique()) + # selected_subjects = df[df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]]["sub_name"].unique() + # selected_subject = st.selectbox("Select Subject", selected_subjects) + + # # Filter data based on the selected instructor and subject + # filtered_data = df[(df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor_name"] == selected_instructor]["inst_id"].values[0]) & + # (df["sub_name"] == selected_subject)] + + # # Create a bar chart for average scores per criteria + # fig = px.bar(instructor_avg_scores, x="instructor_name", + # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"], + # labels={"value": "Average Score", "variable": "Criteria"}, + # title="Average Scores per Criteria by Instructor") + # st.plotly_chart(fig) + + # # Group data by subject instructor and calculate average scores + # subject_avg_scores = filtered_data.groupby("sub_name").agg({ + # "score_criteria_1": "mean", + # "score_criteria_2": "mean", + # "score_criteria_3": "mean" + # }).reset_index() + + # # Create a bar chart for average scores per criteria for the selected subject + # fig = px.bar(subject_avg_scores, x="sub_name", + # y=["score_criteria_1", "score_criteria_2", "score_criteria_3"], + # labels={"value": "Average Score", "variable": "Criteria"}, + # title=f"Average Scores per Criteria for Subject {selected_subject}") + # st.plotly_chart(fig) + + # except Exception as e: + # st.error(f"An error occurred during data analytics: {str(e)}") + + + + + + +def evaluation(cursor, table_name): + try: + # Execute the SQL query to fetch the evaluation data + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + + if not evaluation_data: + st.warning("No evaluation data found.") + else: + # Create a DataFrame from the fetched data and set column names + column_names = [i[0] for i in cursor.description] + df = pd.DataFrame(evaluation_data, columns=column_names) + + # # Display the table with centered text + # st.header(f"{table_name} Table") + # st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + + analyze_instructors_results = analyze_instructors(cursor) + + if st.button("Analyze comments"): + # st.write(analyze_instructors_results[0], analyze_instructors_results[1]) + eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1]) + + + except Exception as e: + st.error(f"An error occurred while fetching evaluation data: {str(e)}") diff --git a/app5_selectbox/evaluation.py b/app5_selectbox/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..32034c44abc5cee00291a5721a61a6ca0f9ec15e --- /dev/null +++ b/app5_selectbox/evaluation.py @@ -0,0 +1,412 @@ +import streamlit as st +import pandas as pd +import plotly.graph_objs as go +import time +import plotly.express as px +import ast +import numpy as np + + +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import generate_unique_4 +from app5_selectbox.evaluation_analysis import eval_analysis +# from app5_selectbox.evaluation_analysis_g4f import eval_analysis + +# from app5_selectbox.langchain_llama_gpu import llm_chain +from app5_selectbox.g4f_prompt import g4f_prompt + +# Function to fetch evaluation data +def fetch_evaluation_data(): + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + if not evaluation_data: + st.warning("No evaluation data found.") + return None + column_names = [i[0] for i in cursor.description] + return pd.DataFrame(evaluation_data, columns=column_names) + +# Function to analyze instructors +def analyze_instructors(evaluation_df): + + if evaluation_df is None: + return + + column_names = evaluation_df.columns[4:14] + criteria_labels = [column.replace("_", " ") for column in column_names] + + cursor.execute("SELECT * FROM instructor") + instructor_data = cursor.fetchall() + + # st.write(instructor_data) + + instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor name","program code","user name","password"]) + instructor_avg_scores = evaluation_df.groupby("inst_id")[column_names].mean().reset_index() + instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left") + + + # st.write(instructor_avg_scores) + # programs_list = sorted(instructor_avg_scores["program code"].unique()) + + # Fetch program options from the program table + cursor.execute("SELECT prog_id, prog_code, prog_name FROM program") + selected_program = pd.DataFrame(cursor.fetchall(), columns=["prog_id", "prog_code", "prog_name"]) + # st.write(selected_program) + # st.write(list({str(prog): prog[0] for prog in program_options})) + selected_program_select = st.selectbox("Select Program", selected_program["prog_code"]) + # selected_program = ast.literal_eval(str(selected_program)) + + # selected_program = st.selectbox("Select Program", programs_list) + filtered_instructor_list = pd.DataFrame(instructor_avg_scores) + # st.write(filtered_instructor_list) + mask = filtered_instructor_list["program code"] == selected_program.loc[selected_program['prog_code'] == selected_program_select, 'prog_id'].values[0] + # st.write(mask) + filtered_instructor_list = filtered_instructor_list.loc[mask] + + # st.write(filtered_instructor_list) + instructors_list = sorted(filtered_instructor_list["instructor name"].unique()) + # print(type(instructor_avg_scores)) + + # instructors_list = instructor_avg_scores.query("program code == {selected_program}") + # st.write(len(instructors_list)) # df to graph + + selected_instructor = st.selectbox("Select Instructor", instructors_list) + + filtered_data = evaluation_df[evaluation_df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst_id"].values[0]] + + selected_instructor_comments = list(filtered_data["comments"]) + st.write(filtered_data) + st.subheader(f"Evaluated by: {len(selected_instructor_comments)} students") + + cursor.execute(""" + SELECT subj_inst.subj_inst_id, subject.sub_name + FROM subj_inst + INNER JOIN subject + ON subj_inst.sub_id_code = subject.sub_id_code + """) + + # Assuming you have a DataFrame named 'filtered_data' + # and column_names is a list of column names you want to consider for calculating average scores + + # Convert all columns to numeric data + filtered_data[column_names] = filtered_data[column_names].apply(pd.to_numeric, errors='coerce') + + # Fetch subject data from the cursor + subject_data = cursor.fetchall() + + # Create a DataFrame for subject data + subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub name"]) + + # Merge subject data with filtered data based on 'subj_inst_id' + filtered_data = filtered_data.merge(subject_df, on="subj_inst_id", how="left") + + # Group by subject name and calculate average scores + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + + # Calculate total average and add it as a new column + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + fig = go.Figure() + + for criterion, label in zip(column_names, criteria_labels): + fig.add_trace(go.Bar( + x=subject_avg_scores["sub name"], + y=subject_avg_scores[criterion], + name=label, + )) + + # Add the total average score above the bars + total_average = subject_avg_scores["total average"].mean() + fig.add_trace(go.Scatter( + x=subject_avg_scores["sub name"], + y=subject_avg_scores["total average"], + mode="markers+text", + text=round(subject_avg_scores["total average"], 2), + textposition="top center", + textfont=dict(size=14), + marker=dict(size=10, color="black"), + name="Total Average", + )) + + fig.update_layout( + width=1000,height=600, + barmode="group", + title=f"Average Scores per Criteria by Subject for Instructor: {selected_instructor}", + xaxis_title=f"Overall Average Score (All Subjects): {total_average:.2f}", + yaxis_title="Average Score", + ) + st.plotly_chart(fig) + + results_to_prompt = "Average score per Criteria\n" + criteria_averages = [(criteria.replace("_", " "), round(filtered_data[criteria].mean(), 2)) for criteria in column_names] + for criteria, average_score in criteria_averages: + results_to_prompt += f"{criteria}: {average_score}/5, \n" + + fig = go.Figure() + fig.add_trace(go.Bar( + x=[criteria for criteria, _ in criteria_averages], + y=[average_score for _, average_score in criteria_averages], + text=[f"{average_score}/5" for _, average_score in criteria_averages], + )) + + fig.update_layout( + width=1000, + title="Average Score per Criteria", + xaxis_title="Criteria", + yaxis_title="Average Score", + ) + + st.plotly_chart(fig) + results_to_prompt = f""" + Based from these over-all average score please Analyze it and provide short insights: {str(results_to_prompt)}. + Make it in sentence type and in English language only. + + """ + while True: + try: + with st.spinner("Analyzing... "): + # st.write(llm_chain.run(prompt)) + st.write(g4f_prompt(results_to_prompt)) ############################# + st.success("Analyzing Complete!") + break + + except Exception as e: + # Handle the error (e.g., log it or take appropriate action) + # Sleep for a moment before retrying + # st.write("Error occurred.. Retrying") + pass + # time.sleep(0.4) + # Add pie graph of evaluation distribution per student's section + # Fetch program options from the program table + cursor.execute(f""" + SELECT + pr.prog_code || '-' || c.class_year || '-' || c.class_section AS merged_result, + COUNT(*) AS occurrence_count + FROM + student s + JOIN + class c ON s.class_id = c.class_id + JOIN + program pr ON c.prog_id = pr.prog_id + WHERE + s.stud_id IN {tuple(list(filtered_data["stud_id"]))} + GROUP BY + s.class_id, pr.prog_code, c.class_year, c.class_section; + + """) + + merged_result = pd.DataFrame(cursor.fetchall(), columns=["merged_result", "occurrence_count"]) + st.write(filtered_data) + st.write(merged_result) + # section_counts = filtered_data["stud_id"].value_counts() + # st.write(section_counts) + + fig = px.pie( + merged_result, + values="occurrence_count", + names="merged_result", + title="Evaluation Distribution per Student's Section", + ) + + # Add percentage and occurrence_count to the hover information + fig.update_traces( + hovertemplate="%{label}: %{percent}
Occurrence Count: %{value}", + textinfo="percent+value", + ) + + fig.update_layout( + width=600, + height=600, + font=dict(size=20), + ) + st.plotly_chart(fig) + + + + cursor.execute(f""" + SELECT + s.class_id, + pr.prog_code || '-' || c.class_year || '-' || c.class_section AS class_info, + COUNT(DISTINCT s.stud_id) AS num_respondents, + ROUND((AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) + + AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) + + AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) + + AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10, 2) AS avg_overall, + ROUND((COUNT(DISTINCT s.stud_id) * (AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) + + AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) + + AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) + + AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10), 2) AS weighted_avg_overall + FROM + evaluation e + JOIN + student s ON e.stud_id = s.stud_id + JOIN + class c ON s.class_id = c.class_id + JOIN + program pr ON c.prog_id = pr.prog_id + WHERE + s.stud_id IN {tuple(list(filtered_data["stud_id"]))} + GROUP BY + s.class_id, pr.prog_code, c.class_year, c.class_section, class_info; + """) + + avg_scores_per_class = pd.DataFrame(cursor.fetchall(), columns=[ + "class_id", + "class_info", + "num_respondents", + "avg_overall", + "weighted_avg_overall" + ]) + + # Calculate the last row's weighted_avg_overall / num_respondents + last_row_index = avg_scores_per_class["weighted_avg_overall"].last_valid_index() + if last_row_index is not None: + avg_scores_per_class.at[last_row_index, "weighted_avg_overall"] /= avg_scores_per_class.at[last_row_index, "num_respondents"] + + # Convert the column to decimal.Decimal before rounding + avg_scores_per_class["weighted_avg_overall"] = avg_scores_per_class["num_respondents"] * avg_scores_per_class["avg_overall"] # avg_scores_per_class["weighted_avg_overall"].apply(lambda x: round(float(x), 2)) + + # Drop rows with None values + avg_scores_per_class = avg_scores_per_class.dropna() + + + # Calculate the overall averages for avg_overall and weighted_avg_overall + num_respondents = round(avg_scores_per_class["num_respondents"].sum(), 2) + overall_avg_overall = round(avg_scores_per_class["avg_overall"].mean(), 2) + overall_weighted_avg_overall = round(avg_scores_per_class["weighted_avg_overall"].sum(),2) + weighted_avg_overall = round(overall_weighted_avg_overall / num_respondents,2) + + # # Append an additional row for avg_overall and weighted_avg_overall + # avg_scores_per_class = avg_scores_per_class.append({ + # "class_id": int(avg_scores_per_class["class_id"].max()) + 1, + # "class_info": "Total", + # "num_respondents": avg_scores_per_class["num_respondents"].sum(), + # "avg_overall": round(overall_avg_overall, 2), + # "weighted_avg_overall": round(overall_weighted_avg_overall / avg_scores_per_class["num_respondents"].sum(), 2) + # }, ignore_index=True) + + # st.write(avg_scores_per_class.style.set_properties(**{'text-align': 'center'})) + + + + # Add summary rows to the DataFrame + avg_scores_per_class = avg_scores_per_class.append({ + "class_id": "", + "class_info": "Summary", + "num_respondents": num_respondents, + "avg_overall": " ", + "weighted_avg_overall": overall_weighted_avg_overall + }, ignore_index=True) + + + def calculate_satisfaction(weighted_avg_overall): + if weighted_avg_overall > 4: + return "Outstanding" + elif weighted_avg_overall > 3: + return "Above Average" + elif weighted_avg_overall > 2: + return "Average" + elif weighted_avg_overall > 1: + return "Below Average" + else: + return "Unsatisfactory" + + def highlight_cell(col, col_label, row_label): + # check if col is a column we want to highlight + if col.name == col_label: + # a boolean mask where True represents a row we want to highlight + mask = (col.index == row_label) + # return an array of string styles (e.g. ["", "background-color: yellow"]) + return ["background-color: lightgreen" if val_bool else "" for val_bool in mask] + else: + # return an array of empty strings that has the same size as col (e.g. ["",""]) + return np.full_like(col, "", dtype="str") + + avg_scores_per_class = avg_scores_per_class.append({ + "class_id": "", + "class_info": "Weighted Avg.", + "num_respondents": " ", # You can set this to "N/A" or any appropriate value + "avg_overall": calculate_satisfaction(weighted_avg_overall), # You can set this to "N/A" or any appropriate value + "weighted_avg_overall": weighted_avg_overall + }, ignore_index=True) + + + # st.dataframe(avg_scores_per_class.style.background_gradient(subset=["C"], cmap="RdYlGn", vmin=0, vmax=2.5)) + avg_scores_per_class =avg_scores_per_class.style.apply(highlight_cell, col_label="avg_overall", row_label=9) + + st.write(avg_scores_per_class) + st.write(f"Number of respondents: {num_respondents}") + st.write(f"Overall weighted avg.: {overall_weighted_avg_overall}") + st.write(f"Weighted avg overall: {weighted_avg_overall}") + + + + + + # if st.button("Analyze the results", key="analyze_results"): + + for subject in subject_avg_scores["sub name"]: + with st.expander(subject): + subject_filtered_data = filtered_data[filtered_data["sub name"] == subject] + promt_txt = "" + fig = go.Figure() + + # st.write(subject_filtered_data) # displays DF for every graphs + for criterion, label in zip(column_names, criteria_labels): + text = round(subject_filtered_data[criterion].mean(),2) + fig.add_trace(go.Bar( + x=[label], + y=[text], + text=text, + name=label, + )) + promt_txt += criterion.replace("_", " ") + ": " + str(text)+ "\n" + # st.text(promt_txt) # prompt per graph + + total_average = subject_filtered_data[column_names].mean(axis=1).mean() + + total_average_txt = f"{subject} Average Score: {round(total_average,2)}/5" + fig.update_layout( + barmode="group", + width=1000, + title=total_average_txt, + yaxis_title="Average Score", + ) + st.plotly_chart(fig) + + prompt = f"generate a very short insights about this faculty evaluation result for the subject {subject}?\n{promt_txt}\nplease strictly shorten your response in sentence format" + # st.text(prompt) + while True: + with st.spinner("Generating Recommendation"): + try: + st.write(g4f_prompt(prompt)) ############################# + # pass + # break + break + except Exception as e: + # Handle the error (e.g., log it or take appropriate action) + # Sleep for a moment before retrying + # st.write("Error occurred.. Retrying") + pass + # time.sleep(0.4) + + return selected_instructor, selected_instructor_comments, results_to_prompt + +def evaluation(): + + try: + evaluation_df = fetch_evaluation_data() + if evaluation_df is not None: + analyze_instructors_results = analyze_instructors(evaluation_df) + # if st.button("Analyze comments"): + # eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2]) + + with st.expander("Sentiment Analysis"): + + eval_analysis(analyze_instructors_results[0], analyze_instructors_results[1], analyze_instructors_results[2]) ############################# + # pass + + except Exception as e: + st.error(f"An error occurred: {str(e)}") diff --git a/app5_selectbox/evaluation_analysis copy 2.py b/app5_selectbox/evaluation_analysis copy 2.py new file mode 100644 index 0000000000000000000000000000000000000000..a5557f9b873dc924a289e43dfb897ab07e9c28ee --- /dev/null +++ b/app5_selectbox/evaluation_analysis copy 2.py @@ -0,0 +1,378 @@ +import gspread +import pandas as pd +from oauth2client.service_account import ServiceAccountCredentials +from transformers import BertForSequenceClassification, BertTokenizer +import torch +import streamlit as st +from matplotlib import pyplot as plt +import numpy as np +from wordcloud import WordCloud +from PIL import ImageFont +# from app5_selectbox.langchain_llama_gpu import llm_chain +from app5_selectbox.g4f_prompt import g4f_prompt + +# # Load the model and tokenizer +# model = BertForSequenceClassification.from_pretrained("./sentiment_model") +# tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + +def eval_analysis(Instructor, Instructor_comment, criteria_results): + # # Authenticate with Google Sheets API + # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] + # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope) + # client = gspread.authorize(creds) + + # # Open the spreadsheet by its title + # spreadsheet = client.open('survey (Responses)') + + # # Select a specific worksheet + # worksheet = spreadsheet.worksheet('Form Responses 1') + + # # Read data from the worksheet + # data = worksheet.get_all_values() + + # # Create a Pandas DataFrame from the data + # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers + # df = df.iloc[:, [1, 2]] # Filter columns + + # # + # instructor_list = df.iloc[:, 0].unique() + # instructor_list = sorted(instructor_list) + # # print(instructor_list) + + # # Create a dropdown widget in the sidebar + # option = st.sidebar.selectbox("Select an option", instructor_list) + + # # Filter rows containing "Instructor 1" + # Instructor = df[df['Instructor'] == option] + # Instructor_comment = Instructor['comment'].tolist() + # ##################################################### BERT MODEL + # def perform_sentiment_analysis(text): + # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) + # with torch.no_grad(): + # outputs = model(**inputs) + # logits = outputs.logits + # predicted_class = torch.argmax(logits, dim=1).item() + # sentiment_labels = ["negative", "neutral", "positive"] + # sentiment = sentiment_labels[predicted_class] + # return sentiment + + + # from transformers import BertForSequenceClassification, BertTokenizer + + # # Load the model and tokenizer + # model = BertForSequenceClassification.from_pretrained("./sentiment_model") + # tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + + # # sample_texts_tfidf = vectorizer.transform(sample_texts) + # # sample_predictions = classifier.predict(sample_texts_tfidf) + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text in Instructor_comment: + # predicted_class = perform_sentiment_analysis(text) + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {predicted_class}") + # sample_predictions.append(predicted_class) + # if predicted_class == "negative": + # negative_count += 1 + # elif predicted_class == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### scikit learn model + + # import joblib + # # Load the model and vectorizer for predictions + # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl") + + # # Transform the new text data using the loaded vectorizer + # new_text_features = loaded_vectorizer.transform(Instructor_comment) + + # # Make predictions using the loaded model + # predicted_class = loaded_model.predict(new_text_features) + # # print(f"Predicted class: {predicted_class}") + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text, prediction in zip(Instructor_comment, predicted_class): + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {prediction}") + # sample_predictions.append(prediction) + # if prediction == "negative": + # negative_count += 1 + # elif prediction == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### bert2 model + import torch + from transformers import BertTokenizer, BertForSequenceClassification + import numpy as np + + # Load the saved model + loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model') + tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) + + + # Encode the sample comments + sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt') + + # Make predictions on the sample comments + sample_input_ids = sample_encodings['input_ids'] + sample_attention_mask = sample_encodings['attention_mask'] + + with torch.no_grad(): + sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask) + + # Get predicted labels + sample_logits = sample_outputs.logits + sample_predictions = np.argmax(sample_logits, axis=1) + + # Map predicted labels back to sentiment labels + sentiment_labels = ['negative', 'positive'] + predicted_sentiments = [sentiment_labels[label] for label in sample_predictions] + + + # # Print the comments and predicted sentiments + # for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + # print(f"Comment: {comment}") + # print(f"Predicted Sentiment: {sentiment}") + # print() + + sample_predictions = [] + + # Initialize counters for sentiment classes + negative_count = 0 + neutral_count = 0 + positive_count = 0 + + # print(predicted_sentiments) + # print(Instructor_comment) + + for text, prediction in zip(Instructor_comment, predicted_sentiments): + print(f"Text: {text}") + print(f"Predicted Sentiment: {prediction}") + sample_predictions.append(prediction) + if prediction == "negative": + negative_count += 1 + elif prediction == "neutral": + neutral_count += 1 + else: + positive_count += 1 + + print(f'negative_count {negative_count}') + print(f'neutral_count {neutral_count}') + print(f'positive_count {positive_count}') + + ################################################### + + # Create a Streamlit app + st.title("Sentiment Analysis Dashboard") + st.sidebar.header("Settings") + + link_text = "Instructor Survey" + link_url = "https://forms.gle/64n9CXMDRP2NYgZYA" + st.sidebar.markdown(f"[{link_text}]({link_url})") + + + # Display sentiment counts + st.write("### Sentiment Counts") + st.write(f"Negative: {negative_count}") + # st.write(f"Neutral: {neutral_count}") + st.write(f"Positive: {positive_count}") + + # Plot sentiment distribution + sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts() + desired_order = ['positive', + # 'neutral', + 'negative'] + sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0) + percentage_distribution = sentiment_counts / len(sample_predictions) * 100 + + st.write("### Sentiment Distribution") + fig, ax = plt.subplots(figsize=(8, 6)) + bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red']) + plt.xlabel('Sentiment') + plt.ylabel('Count') + plt.title('Sentiment Distribution in Sample Predictions') + plt.xticks(rotation=45) + for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order): + height = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom') + st.pyplot(fig) + + st.set_option('deprecation.showPyplotGlobalUse', False) + + # Generate word clouds based on sentiment categories + sentiment_texts = { + 'positive': [], + # 'neutral': [], + 'negative': [] + } + + for text, sentiment in zip(Instructor_comment, sample_predictions): + sentiment_texts[sentiment].append(text) + + text_for_llama = "" + + for sentiment, texts in sentiment_texts.items(): + combined_texts = ' '.join(texts) + combined_texts = combined_texts.split() + filtered_words = [word for word in combined_texts if len(word) > 2] + combined_texts = ' '.join(filtered_words) + if combined_texts =="": continue + # Load your custom TrueType font using PIL + font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file + # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed + # Set the font family to use the TrueType font + # font = ImageFont.truetype(font_path) + + wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts) + st.write(f"### Word Cloud for {sentiment} Sentiment") + plt.figure(figsize=(10, 6)) + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis('off') + st.pyplot() + + if sentiment == "negative": + # Extract the text from the word cloud object + generated_text = wordcloud.words_ + + # Print the generated text + for word, frequency in generated_text.items(): + # print(f"{word}: {frequency}") + text_for_llama += str(word)+" " + + + # Generate a word cloud from all the text data + all_text = ' '.join(Instructor_comment) + all_text = all_text.split() + filtered_words = [word for word in all_text if len(word) > 3] + all_text = ' '.join(filtered_words) + + st.write("### Word Cloud for All Sentiments") + wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text) + plt.figure(figsize=(8, 8), facecolor=None) + plt.imshow(wordcloud) + plt.axis("off") + st.pyplot() + + neg_comments = [] + pos_comments = [] + # Print the comments and predicted sentiments + for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + if sentiment == "positive": pos_comments.append(comment) + else: neg_comments.append(comment) + + + if text_for_llama == "": + st.title("Expressing Gratitude and Dedication") + text_for_llama = f""" + There's no negative feedback/comments to the instructor, give him/her short email to say. + [Your Name] = The Management + [Instructor's Name] = {Instructor} + """ + else: + st.title('Recommendation:') + text_for_llama = text_for_llama.split() + text_for_llama = ", ".join(text_for_llama) + text_for_llama = f""" + Based from these students' feedback: {str(text_for_llama)}. \n + Please generate a short email to teh instructor having 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + define the best email subject based from the recomendation + [Your Name] = The Management + [Instructor's Name] = {Instructor} + + """ + + # text_for_llama = f""" + # Based from these students' feedback: {str(text_for_llama)}. \n + # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + + # """ + + # text_for_llama = f""" + # Based from these students' feedback: {str(text_for_llama)}. \n + # and Overall score per criteria results: {str(criteria_results)}. \n + # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + # """ + # Then give insights about the evaluation report based from different criteria. + # Here is the results: {criteria_results} + # Your response format- + # Recommendation to Instructor: + # Insights on Evaluation Report: + + + + prompt = text_for_llama + + + + # # ================================================ replicate.com + # CUDA_LAUNCH_BLOCKING=1 + # import replicate + # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW') + # output = replicate.run( + # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", + # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3", + # input={"prompt": prompt} + # ) + # st.write(output) + # # The meta/llama-2-70b-chat model can stream output as it's running. + # # The predict method returns an iterator, and you can iterate over that output. + # # ================================================ + + + # # st.title('Recommendation:') + # # llama_output = "" + # # with st.spinner("Generating Recommendation"): + # # loading_text = st.empty() + # # for item in reponse(prompt): + # # llama_output +=item + # # loading_text.write(llama_output) + # # st.success("Generation Complete!") + + # # ================================================ local llama llm_chain + while True: + try: + with st.spinner("Generating...."): + # st.write(llm_chain.run(prompt)) + # st.write(g4f_prompt(prompt)) ################# + st.success("Generation Complete!") + break + + except Exception as e: + # Handle the error (e.g., log it or take appropriate action) + # Sleep for a moment before retrying + # st.write("Error occurred.. Retrying") + pass + # time.sleep(0.4) + # # ================================================ + + + + + + diff --git a/app5_selectbox/evaluation_analysis copy.py b/app5_selectbox/evaluation_analysis copy.py new file mode 100644 index 0000000000000000000000000000000000000000..c7cbf0aa42ea9e6e4cfb5885c0d65e026650a3f0 --- /dev/null +++ b/app5_selectbox/evaluation_analysis copy.py @@ -0,0 +1,330 @@ +import gspread +import pandas as pd +from oauth2client.service_account import ServiceAccountCredentials +from transformers import BertForSequenceClassification, BertTokenizer +import torch +import streamlit as st +from matplotlib import pyplot as plt +import numpy as np +from wordcloud import WordCloud +from PIL import ImageFont +from app5_selectbox.langchain_llama_gpu import llm_chain + +# # Load the model and tokenizer +# model = BertForSequenceClassification.from_pretrained("./sentiment_model") +# tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + +def eval_analysis(Instructor, Instructor_comment, criteria_results): + # # Authenticate with Google Sheets API + # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] + # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope) + # client = gspread.authorize(creds) + + # # Open the spreadsheet by its title + # spreadsheet = client.open('survey (Responses)') + + # # Select a specific worksheet + # worksheet = spreadsheet.worksheet('Form Responses 1') + + # # Read data from the worksheet + # data = worksheet.get_all_values() + + # # Create a Pandas DataFrame from the data + # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers + # df = df.iloc[:, [1, 2]] # Filter columns + + # # + # instructor_list = df.iloc[:, 0].unique() + # instructor_list = sorted(instructor_list) + # # print(instructor_list) + + # # Create a dropdown widget in the sidebar + # option = st.sidebar.selectbox("Select an option", instructor_list) + + # # Filter rows containing "Instructor 1" + # Instructor = df[df['Instructor'] == option] + # Instructor_comment = Instructor['comment'].tolist() + # ##################################################### BERT MODEL + # def perform_sentiment_analysis(text): + # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) + # with torch.no_grad(): + # outputs = model(**inputs) + # logits = outputs.logits + # predicted_class = torch.argmax(logits, dim=1).item() + # sentiment_labels = ["negative", "neutral", "positive"] + # sentiment = sentiment_labels[predicted_class] + # return sentiment + + + # from transformers import BertForSequenceClassification, BertTokenizer + + # # Load the model and tokenizer + # model = BertForSequenceClassification.from_pretrained("./sentiment_model") + # tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + + # # sample_texts_tfidf = vectorizer.transform(sample_texts) + # # sample_predictions = classifier.predict(sample_texts_tfidf) + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text in Instructor_comment: + # predicted_class = perform_sentiment_analysis(text) + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {predicted_class}") + # sample_predictions.append(predicted_class) + # if predicted_class == "negative": + # negative_count += 1 + # elif predicted_class == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### scikit learn model + + # import joblib + # # Load the model and vectorizer for predictions + # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl") + + # # Transform the new text data using the loaded vectorizer + # new_text_features = loaded_vectorizer.transform(Instructor_comment) + + # # Make predictions using the loaded model + # predicted_class = loaded_model.predict(new_text_features) + # # print(f"Predicted class: {predicted_class}") + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text, prediction in zip(Instructor_comment, predicted_class): + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {prediction}") + # sample_predictions.append(prediction) + # if prediction == "negative": + # negative_count += 1 + # elif prediction == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### bert2 model + import torch + from transformers import BertTokenizer, BertForSequenceClassification + import numpy as np + + # Load the saved model + loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model') + tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) + + + # Encode the sample comments + sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt') + + # Make predictions on the sample comments + sample_input_ids = sample_encodings['input_ids'] + sample_attention_mask = sample_encodings['attention_mask'] + + with torch.no_grad(): + sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask) + + # Get predicted labels + sample_logits = sample_outputs.logits + sample_predictions = np.argmax(sample_logits, axis=1) + + # Map predicted labels back to sentiment labels + sentiment_labels = ['negative', 'positive'] + predicted_sentiments = [sentiment_labels[label] for label in sample_predictions] + + # # Print the comments and predicted sentiments + # for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + # print(f"Comment: {comment}") + # print(f"Predicted Sentiment: {sentiment}") + # print() + + sample_predictions = [] + + # Initialize counters for sentiment classes + negative_count = 0 + neutral_count = 0 + positive_count = 0 + + + for text, prediction in zip(Instructor_comment, predicted_sentiments): + print(f"Text: {text}") + print(f"Predicted Sentiment: {prediction}") + sample_predictions.append(prediction) + if prediction == "negative": + negative_count += 1 + elif prediction == "neutral": + neutral_count += 1 + else: + positive_count += 1 + + print(f'negative_count {negative_count}') + print(f'neutral_count {neutral_count}') + print(f'positive_count {positive_count}') + + ################################################### + + # Create a Streamlit app + st.title("Sentiment Analysis Dashboard") + st.sidebar.header("Settings") + + link_text = "Instructor Survey" + link_url = "https://forms.gle/64n9CXMDRP2NYgZYA" + st.sidebar.markdown(f"[{link_text}]({link_url})") + + + # Display sentiment counts + st.write("### Sentiment Counts") + st.write(f"Negative: {negative_count}") + # st.write(f"Neutral: {neutral_count}") + st.write(f"Positive: {positive_count}") + + # Plot sentiment distribution + sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts() + desired_order = ['positive', + # 'neutral', + 'negative'] + sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0) + percentage_distribution = sentiment_counts / len(sample_predictions) * 100 + + st.write("### Sentiment Distribution") + fig, ax = plt.subplots(figsize=(8, 6)) + bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red']) + plt.xlabel('Sentiment') + plt.ylabel('Count') + plt.title('Sentiment Distribution in Sample Predictions') + plt.xticks(rotation=45) + for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order): + height = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom') + st.pyplot(fig) + + st.set_option('deprecation.showPyplotGlobalUse', False) + + # Generate word clouds based on sentiment categories + sentiment_texts = { + 'positive': [], + # 'neutral': [], + 'negative': [] + } + + for text, sentiment in zip(Instructor_comment, sample_predictions): + sentiment_texts[sentiment].append(text) + + text_for_llama = "" + + for sentiment, texts in sentiment_texts.items(): + combined_texts = ' '.join(texts) + combined_texts = combined_texts.split() + filtered_words = [word for word in combined_texts if len(word) > 2] + combined_texts = ' '.join(filtered_words) + if combined_texts =="": continue + # Load your custom TrueType font using PIL + font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file + # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed + # Set the font family to use the TrueType font + # font = ImageFont.truetype(font_path) + + wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts) + st.write(f"### Word Cloud for {sentiment} Sentiment") + plt.figure(figsize=(10, 6)) + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis('off') + st.pyplot() + + if sentiment == "negative": + # Extract the text from the word cloud object + generated_text = wordcloud.words_ + + # Print the generated text + for word, frequency in generated_text.items(): + # print(f"{word}: {frequency}") + text_for_llama += str(word)+" " + + + # Generate a word cloud from all the text data + all_text = ' '.join(Instructor_comment) + all_text = all_text.split() + filtered_words = [word for word in all_text if len(word) > 3] + all_text = ' '.join(filtered_words) + + st.write("### Word Cloud for All Sentiments") + wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text) + plt.figure(figsize=(8, 8), facecolor=None) + plt.imshow(wordcloud) + plt.axis("off") + st.pyplot() + + neg_comments = [] + pos_comments = [] + # Print the comments and predicted sentiments + for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + if sentiment == "positive": pos_comments.append(comment) + else: neg_comments.append(comment) + + text_for_llama = text_for_llama.split() + text_for_llama = ", ".join(text_for_llama) + text_for_llama = f""" + Based from these students' feedback: {str(text_for_llama)}. \n + Please generate a recommendation to the instructor. Make it in sentence type and in English language only. + Then give insights about the evaluation report based from different criteria. + Here is the results: {criteria_results} + Your response format- + Recommendation to Instructor: + Insights on Evaluation Report: + + """ + + prompt = text_for_llama + # # ================================================ replicate.com + # CUDA_LAUNCH_BLOCKING=1 + # import replicate + # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW') + # output = replicate.run( + # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", + # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3", + # input={"prompt": prompt} + # ) + # # The meta/llama-2-70b-chat model can stream output as it's running. + # # The predict method returns an iterator, and you can iterate over that output. + # # ================================================ + + + # st.title('Recommendation:') + # llama_output = "" + # with st.spinner("Generating Recommendation"): + # loading_text = st.empty() + # for item in reponse(prompt): + # llama_output +=item + # loading_text.write(llama_output) + # st.success("Generation Complete!") + + st.title('Recommendation:') + llama_output = "" + with st.spinner("Generating Recommendation"): + st.write(llm_chain.run(prompt)) + st.success("Generation Complete!") + + + + diff --git a/app5_selectbox/evaluation_analysis.py b/app5_selectbox/evaluation_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..82a3a4fa0ec1673da6d31173b5d3a40324c93a78 --- /dev/null +++ b/app5_selectbox/evaluation_analysis.py @@ -0,0 +1,347 @@ +import gspread +import pandas as pd +from oauth2client.service_account import ServiceAccountCredentials +from transformers import AutoTokenizer, AutoModelForSequenceClassification +import torch +import re +import streamlit as st +import hydralit_components as hc +from matplotlib import pyplot as plt +import numpy as np +from wordcloud import WordCloud +import plotly.graph_objs as go +import plotly.express as px +import plotly.figure_factory as ff +from PIL import ImageFont +# from app5_selectbox.langchain_llama_gpu import llm_chain +from app5_selectbox.g4f_prompt import g4f_prompt +# from app5_selectbox.llama2_prompt import llama_prompt +from app5_selectbox.naive_bayes_cl import nb_clf + +from HF_inference import analyze_sintement + +models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL'] + + +# old path +# model_list = [ +# r"/home/aibo/prototype_v1/BERT_BASE/bert_sentiment_model", +# r"/home/aibo/prototype_v1/BERT_LARGE/bert_sentiment_model", +# r"/home/aibo/prototype_v1/DISTILIBERT/bert_sentiment_model" +# ] + +# new path +model_list = [ + r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-bert-base-uncased", + r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-bert-large-uncased", + r"/home/aibo/prototype_v1/HF_MODELS/HUB/stud-fac-eval-distilbert-base-uncased", +] + +model_tokenizer_list = ['bert-base-uncased', 'bert-large-uncased', 'distilbert-base-uncased'] + +selected_model = 0 +llama2_g4f = False # true == llama2 + +# if 'chkbx_selected_model' in st.session_state: +# st.write("selected model: ",models.index(st.session_state.chkbx_selected_model)) +# if 'chkbx_selected_model' not in st.session_state: +# st.write("no selected!") + + +def clean_text(text_list): + cleaned_samples = [] + for text_sample in text_list: + # Case folding and normalization + cleaned_text = str(text_sample).lower() + + # Removing non-alphabetic characters + cleaned_text = re.sub(r'[^a-zA-Z\s]', '', cleaned_text) + + cleaned_samples.append(cleaned_text) + return cleaned_samples + + +# local model +def classify_sentiments(text_samples, tokenizer, model): + instructor_comments = [] + predicted_sentiments = [] + predicted_sentiments_scores = [] + + # Iterate through the text samples and classify the sentiment + for idx, text_sample in enumerate(text_samples): + # Tokenize the text sample + inputs = tokenizer(text_sample, return_tensors="pt") + + # Perform sentiment classification + outputs = model(**inputs) + + # Get the predicted sentiment (positive/negative) + predicted_class = torch.argmax(outputs.logits, dim=1).item() + + # Get the probabilities for each class + probabilities = torch.softmax(outputs.logits, dim=1).tolist()[0] + + # Store results + instructor_comments.append(text_sample) + predicted_sentiments.append("positive" if predicted_class == 1 else "negative") + predicted_sentiments_scores.append({"positive": probabilities[1]*100, "negative": probabilities[0]*100}) + return instructor_comments, predicted_sentiments, predicted_sentiments_scores + + +# # inference +# def classify_sentiments(text_samples, model): +# instructor_comments = [] +# predicted_sentiments = [] +# predicted_sentiments_scores = [] + +# # text = ["i love this", "nice one!", "happy!"] +# selected_model = model +# results = [analyze_sintement(t, selected_model) for t in text_samples] + + + +# for idx, result in enumerate(results): +# # st.text(result[0]) +# # predicted_class, probabilities = analyze_sintement(text_sample, model) +# # Store results +# instructor_comments.append(text_samples[idx]) +# predicted_sentiments.append("positive" if result[0] == "LABEL_1" else "negative") +# predicted_sentiments_scores.append({"positive": result[1]*100, "negative": 100-(result[1]*100)}) + +# # st.write(instructor_comments) +# return instructor_comments, predicted_sentiments, predicted_sentiments_scores + + + +def calculate_average_scores(probability_list): + total_comments = len(probability_list) + total_positive = 0 + total_negative = 0 + + for prob_dict in probability_list: + total_positive += prob_dict['positive'] + total_negative += prob_dict['negative'] + + average_positive = total_positive / total_comments + average_negative = total_negative / total_comments + return average_positive, average_negative + +def eval_analysis(instructor, instructor_comment, criteria_results, selected_model): + if selected_model < 3: + model = model_list[selected_model] + # model_tokenizer = model_tokenizer_list[selected_model] + model_tokenizer = model_list[selected_model] + loaded_model = AutoModelForSequenceClassification.from_pretrained(model) + tokenizer = AutoTokenizer.from_pretrained(model_tokenizer) + + clean_instructor_comment = clean_text(instructor_comment) + + predicted_sentiments_transformer = classify_sentiments(clean_instructor_comment, tokenizer, loaded_model) # local model + # predicted_sentiments_transformer = classify_sentiments(clean_instructor_comment, models[selected_model]) # inference + + predicted_sentiments = predicted_sentiments_transformer[1] + scores = predicted_sentiments_transformer[2] + + + elif selected_model == 3: + try: + instructor_comment, predicted_sentiments, scores = nb_clf(instructor_comment) + # scores = scores[1] + except Exception as e: + st.exception(e) + else: pass + + sample_predictions = [] + comments_data = [] + negative_count = 0 + neutral_count = 0 + positive_count = 0 + # average_sintement_score = np.average(scores['positive']) + + average_positive, average_negative = calculate_average_scores(scores) + + # st.text(calculate_average_scores(scores)) + for text, prediction, score in zip(instructor_comment, predicted_sentiments, scores): + sample_predictions.append(prediction) + comments_data.append((text, prediction, score['positive'])) + if prediction == "negative": + negative_count += 1 + elif prediction == "neutral": + neutral_count += 1 + else: + positive_count += 1 + + sentiment_texts = { + 'positive': [], + 'negative': [] + } + + for text, sentiment in zip(instructor_comment, sample_predictions): + sentiment_texts[sentiment].append(text) + + text_for_llama = "" + + + def sentiment_tbl(): + # Create DataFrame + comments_df = pd.DataFrame(instructor_comment, columns=["Comments"]) + + # Drop index + comments_df_display = comments_df.copy() + comments_df_display.reset_index(drop=True, inplace=True) + + # Create DataFrame + comments_data_df = pd.DataFrame(comments_data, columns=["Comments", "Sentiment", "Score"]) + # Define a function to apply row-wise styling + def highlight_row(row): + if row["Sentiment"] == "positive": + return ['background-color: lightgreen'] * len(row) + elif row["Sentiment"] == "negative": + return ['background-color: lightcoral'] * len(row) + else: + return [''] * len(row) + + # Set index to start at 1 + comments_data_df.index += 1 + + # Apply styling + styled_df = comments_data_df.style.apply(highlight_row, axis=1) + + # Display styled DataFrame + st.table(styled_df) + + + theme_bad = {'bgcolor': '#FFF0F0','title_color': 'red','content_color': 'red','icon_color': 'red', 'icon': 'fa fa-times-circle'} + theme_good = {'bgcolor': '#EFF8F7','title_color': 'green','content_color': 'green','icon_color': 'green', 'icon': 'fa fa-check-circle'} + + st.write(f"### SENTIMENTS/RECOMENDATION INSIGHTS") + with st.expander("Sentiment Analysis"): + st.title("Sentiment Analysis Dashboard") + st.write(f"## Using {models[selected_model]}") + st.write("### Sentiment Rating") + + cc = st.columns(2) + with cc[0]: + # can just use 'good', 'bad', 'neutral' sentiment to auto color the card + hc.info_card(title='Positive', content=str(round(average_positive,6))+ '%', sentiment='good', bar_value=round(average_positive,6)) + with cc[1]: + hc.info_card(title='Negative', content=str(round(average_negative,6))+ '%', sentiment='bad', bar_value=round(average_negative,6)) + + # st.write(f"#### Positive: {positive_count} - {round(average_positive,6)} %") + # st.write(f"#### Negative: {negative_count} - {round(average_negative,6)} %") + + # st.write("### Sentiment Rating") + # st.write(f"#### Positive: {round(average_positive*100,2)} %") + # st.write(f"#### Negative: {round(average_negative*100,2)} %") + + + sentiment_counts = pd.Series(sample_predictions).value_counts() + desired_order = ['positive', 'negative'] + sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0) + percentage_distribution = sentiment_counts / len(sample_predictions) * 100 + + sentiment_tbl() + + st.write("### Sentiment Distribution") + + fig = go.Figure(layout=dict( + autosize=True, # Set autosize to True for automatic adjustment + )) + fig.add_trace(go.Bar( + x=percentage_distribution.index, + y=sentiment_counts.values, + marker_color=['green', 'red'], + text=[f'{percentage:.2f}% {des_order.upper()}' for percentage, des_order in zip(percentage_distribution, desired_order)], + textposition='auto' + )) + + fig.update_layout( + width=600, + height=500, + xaxis=dict(title='Sentiment', tickangle=45), + yaxis=dict(title='Count'), + title='Sentiment Distribution in Sample Predictions', + ) + + st.plotly_chart(fig) + + for sentiment, texts in sentiment_texts.items(): + combined_texts = ' '.join(texts) + combined_texts = combined_texts.split() + filtered_words = [word for word in combined_texts if len(word) > 2] + combined_texts = ' '.join(filtered_words) + + if combined_texts == "": + continue + + font_path = "/home/aibo/prototype_v1/prototype/app5_selectbox/QuartzoBold-W9lv.ttf" + wordcloud = WordCloud(font_path=font_path, width=800, height=600, background_color='white', max_words=15, min_word_length=3, stopwords={}).generate(combined_texts) + + st.write(f"### Word Cloud for {sentiment.capitalize()} Sentiment") + + plt.figure(figsize=(10, 6)) + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis("off") + wordcloud_fig = plt.gcf() + st.pyplot(wordcloud_fig) + + if sentiment == "negative": + text_for_llama = sentiment_texts[sentiment] + + + # Generate a word cloud from all the text data + all_text = ' '.join(instructor_comment) + all_text = all_text.split() + filtered_words = [word for word in all_text if len(word) > 2] + all_text = ' '.join(filtered_words) + st.write("### Word Cloud for All Sentiments") + wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200, min_word_length=3, stopwords={}).generate(all_text) + # Create a Matplotlib figure + plt.figure(figsize=(8, 8)) + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis("off") + wordcloud_fig = plt.gcf() + st.pyplot(wordcloud_fig) + + if text_for_llama == "": + with st.expander("Expressing Gratitude and Dedication"): + st.title("Expressing Gratitude and Dedication") + text_for_llama = f""" + There's no negative feedback or comment for the instructor; give him or her a short letter to say. + [Your Name] = The Management + [Instructor's Name] = {instructor} + """ + prompt = text_for_llama + while True: + try: + with st.spinner("Generating...."): + if not llama2_g4f: st.write(g4f_prompt(prompt)) ################# + # else: st.write(llama_prompt(prompt)) ################# + st.success("Generation Complete!") + break + except Exception as e: + pass + else: + with st.expander("Recommendation"): + # st.title('Recommendation:') + # text_for_llama = text_for_llama.split() + text_for_llama = ", ".join(text_for_llama) + text_for_llama = f""" + Based on these students' feedback: {str(text_for_llama)}. \n + Please generate a short letter to the instructor with ten recommendations in bullet format. Make it in sentence type and English only. + Define the best letter's subject based on the recommendation. + Subject is Recommendations for Effective Teaching + Sender's Name is 'The Management' + receiver's or Instructor's Name is {instructor} + + """ + prompt = text_for_llama + while True: + try: + with st.spinner("Generating...."): + if not llama2_g4f: st.write(g4f_prompt(prompt)) ################# + # else: st.write(llama_prompt(prompt)) ################# + st.success("Generation Complete!") + break + except Exception as e: + pass \ No newline at end of file diff --git a/app5_selectbox/evaluation_analysis_g4f.py b/app5_selectbox/evaluation_analysis_g4f.py new file mode 100644 index 0000000000000000000000000000000000000000..0dcb582c0c31a4fd55086181c02b4721deaf6ff2 --- /dev/null +++ b/app5_selectbox/evaluation_analysis_g4f.py @@ -0,0 +1,382 @@ +import gspread +import pandas as pd +from oauth2client.service_account import ServiceAccountCredentials +from transformers import BertForSequenceClassification, BertTokenizer +import torch +import streamlit as st +from matplotlib import pyplot as plt +import numpy as np +from wordcloud import WordCloud +from PIL import ImageFont +# from app5_selectbox.langchain_llama_gpu import llm_chain +from app5_selectbox.g4f_prompt import g4f_prompt +from app5_selectbox.df4_sentiment_analysis import sentiment_func + +# # Load the model and tokenizer +# model = BertForSequenceClassification.from_pretrained("./sentiment_model") +# tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + +def eval_analysis(Instructor, Instructor_comment, criteria_results): + # # Authenticate with Google Sheets API + # scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] + # creds = ServiceAccountCredentials.from_json_keyfile_name('dataset-401003-7325e98039a4.json', scope) + # client = gspread.authorize(creds) + + # # Open the spreadsheet by its title + # spreadsheet = client.open('survey (Responses)') + + # # Select a specific worksheet + # worksheet = spreadsheet.worksheet('Form Responses 1') + + # # Read data from the worksheet + # data = worksheet.get_all_values() + + # # Create a Pandas DataFrame from the data + # df = pd.DataFrame(data[1:], columns=data[0]) # Assuming the first row contains column headers + # df = df.iloc[:, [1, 2]] # Filter columns + + # # + # instructor_list = df.iloc[:, 0].unique() + # instructor_list = sorted(instructor_list) + # # print(instructor_list) + + # # Create a dropdown widget in the sidebar + # option = st.sidebar.selectbox("Select an option", instructor_list) + + # # Filter rows containing "Instructor 1" + # Instructor = df[df['Instructor'] == option] + # Instructor_comment = Instructor['comment'].tolist() + # ##################################################### BERT MODEL + # def perform_sentiment_analysis(text): + # inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) + # with torch.no_grad(): + # outputs = model(**inputs) + # logits = outputs.logits + # predicted_class = torch.argmax(logits, dim=1).item() + # sentiment_labels = ["negative", "neutral", "positive"] + # sentiment = sentiment_labels[predicted_class] + # return sentiment + + + # from transformers import BertForSequenceClassification, BertTokenizer + + # # Load the model and tokenizer + # model = BertForSequenceClassification.from_pretrained("./sentiment_model") + # tokenizer = BertTokenizer.from_pretrained("./sentiment_model") + + # # sample_texts_tfidf = vectorizer.transform(sample_texts) + # # sample_predictions = classifier.predict(sample_texts_tfidf) + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text in Instructor_comment: + # predicted_class = perform_sentiment_analysis(text) + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {predicted_class}") + # sample_predictions.append(predicted_class) + # if predicted_class == "negative": + # negative_count += 1 + # elif predicted_class == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### scikit learn model + + # import joblib + # # Load the model and vectorizer for predictions + # loaded_model, loaded_vectorizer = joblib.load("MultinomialNB_Sentiment.pkl") + + # # Transform the new text data using the loaded vectorizer + # new_text_features = loaded_vectorizer.transform(Instructor_comment) + + # # Make predictions using the loaded model + # predicted_class = loaded_model.predict(new_text_features) + # # print(f"Predicted class: {predicted_class}") + + # sample_predictions = [] + + # # Initialize counters for sentiment classes + # negative_count = 0 + # neutral_count = 0 + # positive_count = 0 + + + # for text, prediction in zip(Instructor_comment, predicted_class): + # print(f"Text: {text}") + # print(f"Predicted Sentiment: {prediction}") + # sample_predictions.append(prediction) + # if prediction == "negative": + # negative_count += 1 + # elif prediction == "neutral": + # neutral_count += 1 + # else: + # positive_count += 1 + + # print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + # print(f'positive_count {positive_count}') + + ################################################### bert2 model + import torch + from transformers import BertTokenizer, BertForSequenceClassification + import numpy as np + + # Load the saved model + loaded_model = BertForSequenceClassification.from_pretrained('sentiment_model') + tokenizerr = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) + + + # Encode the sample comments + sample_encodings = tokenizerr(list(Instructor_comment), truncation=True, padding=True, max_length=128, return_tensors='pt') + + # Make predictions on the sample comments + sample_input_ids = sample_encodings['input_ids'] + sample_attention_mask = sample_encodings['attention_mask'] + + with torch.no_grad(): + sample_outputs = loaded_model(sample_input_ids, attention_mask=sample_attention_mask) + + # Get predicted labels + sample_logits = sample_outputs.logits + sample_predictions = np.argmax(sample_logits, axis=1) + + + # Map predicted labels back to sentiment labels + sentiment_labels = ['negative', 'positive'] + # predicted_sentiments = [sentiment_labels[label] for label in sample_predictions] + + predicted_sentiments = sentiment_func(Instructor_comment) + print(predicted_sentiments) + + # # Print the comments and predicted sentiments + # for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + # print(f"Comment: {comment}") + # print(f"Predicted Sentiment: {sentiment}") + # print() + + sample_predictions = [] + + # Initialize counters for sentiment classes + negative_count = 0 + neutral_count = 0 + positive_count = 0 + + # print(predicted_sentiments) + # print(Instructor_comment) + + for text, prediction in zip(Instructor_comment, predicted_sentiments): + print(f"Text: {text}") + print(f"Predicted Sentiment: {prediction}") + sample_predictions.append(prediction) + if prediction == "negative": + negative_count += 1 + elif prediction == "neutral": + neutral_count += 1 + else: + positive_count += 1 + + print(f'negative_count {negative_count}') + # print(f'neutral_count {neutral_count}') + print(f'positive_count {positive_count}') + + ################################################### + + # Create a Streamlit app + st.title("Sentiment Analysis Dashboard") + st.sidebar.header("Settings") + + link_text = "Instructor Survey" + link_url = "https://forms.gle/64n9CXMDRP2NYgZYA" + st.sidebar.markdown(f"[{link_text}]({link_url})") + + + # Display sentiment counts + st.write("### Sentiment Counts") + st.write(f"Negative: {negative_count}") + # st.write(f"Neutral: {neutral_count}") + st.write(f"Positive: {positive_count}") + + # Plot sentiment distribution + sentiment_counts = pd.Series(np.array(sample_predictions)).value_counts() + desired_order = ['positive', + # 'neutral', + 'negative'] + sentiment_counts = sentiment_counts.reindex(desired_order, fill_value=0) + percentage_distribution = sentiment_counts / len(sample_predictions) * 100 + + st.write("### Sentiment Distribution") + fig, ax = plt.subplots(figsize=(8, 6)) + bars = plt.bar(percentage_distribution.index, sentiment_counts.values, color=['green', 'orange', 'red']) + plt.xlabel('Sentiment') + plt.ylabel('Count') + plt.title('Sentiment Distribution in Sample Predictions') + plt.xticks(rotation=45) + for bar, percentage, des_order in zip(bars, percentage_distribution, desired_order): + height = bar.get_height() + ax.text(bar.get_x() + bar.get_width() / 2, height, f'{percentage:.2f}% {des_order.upper()}', ha='center', va='bottom') + st.pyplot(fig) + + st.set_option('deprecation.showPyplotGlobalUse', False) + + # Generate word clouds based on sentiment categories + sentiment_texts = { + 'positive': [], + # 'neutral': [], + 'negative': [] + } + + for text, sentiment in zip(Instructor_comment, sample_predictions): + sentiment_texts[sentiment].append(text) + + text_for_llama = "" + + for sentiment, texts in sentiment_texts.items(): + combined_texts = ' '.join(texts) + combined_texts = combined_texts.split() + filtered_words = [word for word in combined_texts if len(word) > 2] + combined_texts = ' '.join(filtered_words) + if combined_texts =="": continue + # Load your custom TrueType font using PIL + font_path = "QuartzoBold-W9lv.ttf" # Replace with the path to your TTF font file + # custom_font = ImageFont.truetyp e(font_path) # Adjust the font size as needed + # Set the font family to use the TrueType font + # font = ImageFont.truetype(font_path) + + wordcloud = WordCloud(font_path=font_path,width=800, height=600, background_color='white', max_words=15).generate(combined_texts) + st.write(f"### Word Cloud for {sentiment} Sentiment") + plt.figure(figsize=(10, 6)) + plt.imshow(wordcloud, interpolation='bilinear') + plt.axis('off') + st.pyplot() + + if sentiment == "negative": + # Extract the text from the word cloud object + generated_text = wordcloud.words_ + + # Print the generated text + for word, frequency in generated_text.items(): + # print(f"{word}: {frequency}") + text_for_llama += str(word)+" " + + + # Generate a word cloud from all the text data + all_text = ' '.join(Instructor_comment) + all_text = all_text.split() + filtered_words = [word for word in all_text if len(word) > 3] + all_text = ' '.join(filtered_words) + + st.write("### Word Cloud for All Sentiments") + wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color='white', max_words=200).generate(all_text) + plt.figure(figsize=(8, 8), facecolor=None) + plt.imshow(wordcloud) + plt.axis("off") + st.pyplot() + + neg_comments = [] + pos_comments = [] + # Print the comments and predicted sentiments + for comment, sentiment in zip(Instructor_comment, predicted_sentiments): + if sentiment == "positive": pos_comments.append(comment) + else: neg_comments.append(comment) + + + if text_for_llama == "": + st.title("Expressing Gratitude and Dedication") + text_for_llama = f""" + There's no negative feedback/comments to the instructor, give him/her short email to say. + [Your Name] = The Management + [Instructor's Name] = {Instructor} + """ + else: + st.title('Recommendation:') + text_for_llama = text_for_llama.split() + text_for_llama = ", ".join(text_for_llama) + text_for_llama = f""" + Based from these students' feedback: {str(text_for_llama)}. \n + Please generate a short email to teh instructor having 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + define the best email subject based from the recomendation + [Your Name] = The Management + [Instructor's Name] = {Instructor} + + """ + + # text_for_llama = f""" + # Based from these students' feedback: {str(text_for_llama)}. \n + # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + + # """ + + # text_for_llama = f""" + # Based from these students' feedback: {str(text_for_llama)}. \n + # and Overall score per criteria results: {str(criteria_results)}. \n + # Please generate a short 10 recommendation in bullet format to the instructor. Make it in sentence type and in English language only. + # """ + # Then give insights about the evaluation report based from different criteria. + # Here is the results: {criteria_results} + # Your response format- + # Recommendation to Instructor: + # Insights on Evaluation Report: + + + + prompt = text_for_llama + + + + # # ================================================ replicate.com + # CUDA_LAUNCH_BLOCKING=1 + # import replicate + # replicate = replicate.Client(api_token='r8_M9Dx8VYKkuTcw1o39d4Yw0HtpWFt4k239ebvW') + # output = replicate.run( + # # "meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", + # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3", + # input={"prompt": prompt} + # ) + # st.write(output) + # # The meta/llama-2-70b-chat model can stream output as it's running. + # # The predict method returns an iterator, and you can iterate over that output. + # # ================================================ + + + # # st.title('Recommendation:') + # # llama_output = "" + # # with st.spinner("Generating Recommendation"): + # # loading_text = st.empty() + # # for item in reponse(prompt): + # # llama_output +=item + # # loading_text.write(llama_output) + # # st.success("Generation Complete!") + + # # ================================================ local llama llm_chain + while True: + try: + with st.spinner("Generating...."): + # st.write(llm_chain.run(prompt)) + # st.write(g4f_prompt(prompt)) ################# + st.success("Generation Complete!") + break + + except Exception as e: + # Handle the error (e.g., log it or take appropriate action) + # Sleep for a moment before retrying + # st.write("Error occurred.. Retrying") + pass + # time.sleep(0.4) + # # ================================================ + + + + + + diff --git a/app5_selectbox/evaluation_fac.py b/app5_selectbox/evaluation_fac.py new file mode 100644 index 0000000000000000000000000000000000000000..d18c56cca80796ca5def11f775d702ff802caf52 --- /dev/null +++ b/app5_selectbox/evaluation_fac.py @@ -0,0 +1,528 @@ +import streamlit as st +import pandas as pd +import plotly.graph_objs as go +import time +import plotly.express as px +import ast +import numpy as np + +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import generate_unique_4 +from app5_selectbox.evaluation_analysis import eval_analysis +# from app5_selectbox.evaluation_analysis_g4f import eval_analysis + +# from app5_selectbox.langchain_llama_gpu import llm_chain +from app5_selectbox.g4f_prompt import g4f_prompt + + +# st.title("Student-Faculty Evaluation") + + + + + +# st.write(st.session_state.student_id) +# Function to fetch evaluation data +def fetch_evaluation_data(): + cursor.execute("SELECT * FROM evaluation") + evaluation_data = cursor.fetchall() + if not evaluation_data: + st.warning("No evaluation data found.") + return None + column_names = [i[0] for i in cursor.description] + return pd.DataFrame(evaluation_data, columns=column_names) + +# Function to analyze instructors +def analyze_instructors(evaluation_df): + if evaluation_df is None: + return + + column_names = evaluation_df.columns[4:14] + criteria_labels = [column.replace("_", " ") for column in column_names] + + cursor.execute("SELECT * FROM instructor") + instructor_data = cursor.fetchall() + + instructor_df = pd.DataFrame(instructor_data, columns=["inst_id", "instructor name","program code", "user name", "password"]) + instructor_avg_scores = evaluation_df.groupby("inst_id")[column_names].mean().reset_index() + instructor_avg_scores = instructor_avg_scores.merge(instructor_df, on="inst_id", how="left") + + + # st.write(instructor_avg_scores) + # programs_list = sorted(instructor_avg_scores["program code"].unique()) + + # # Fetch program options from the program table + # cursor.execute("SELECT prog_id, prog_code, prog_name FROM program") + # selected_program = pd.DataFrame(cursor.fetchall(), columns=["prog_id", "prog_code", "prog_name"]) + # st.write(selected_program) + # # st.write(list({str(prog): prog[0] for prog in program_options})) + # selected_program_select = st.selectbox("Select Program", selected_program["prog_code"]) + # # selected_program = ast.literal_eval(str(selected_program)) + + # # selected_program = st.selectbox("Select Program", programs_list) + # filtered_instructor_list = pd.DataFrame(instructor_avg_scores) + # # st.write(filtered_instructor_list) + # mask = filtered_instructor_list["program code"] == selected_program.loc[selected_program['prog_code'] == selected_program_select, 'prog_id'].values[0] + # # st.write(mask) + # filtered_instructor_list = filtered_instructor_list.loc[mask] + + # # st.write(filtered_instructor_list) + # instructors_list = sorted(filtered_instructor_list["instructor name"].unique()) + # # print(type(instructor_avg_scores)) + + # instructors_list = instructor_avg_scores.query("program code == {selected_program}") + # st.write(len(instructors_list)) # df to graph + + # selected_instructor = st.selectbox("Select Instructor", instructors_list) + selected_instructor = st.session_state.inst_name + + try: + filtered_data = evaluation_df[evaluation_df["inst_id"] == instructor_avg_scores[instructor_avg_scores["instructor name"] == selected_instructor]["inst_id"].values[0]] + selected_instructor_comments = list(filtered_data["comments"]) + st.write(f"## Welcome! {selected_instructor}") + st.subheader(f"You are Evaluated by: {len(selected_instructor_comments)} students") + except: + st.info("### No Existing Evaluation Found!",icon="❗") + + + models = ['BERT-BASE MODEL', 'BERT-LARGE MODEL', 'DISTILIBERT MODEL', 'NAIVE BAYES MODEL'] + with st.sidebar.expander("Settings"): + # enable_analyze_graph = st.checkbox("Analyze graph by LLM", value=False) + global enable_llm_analyze_sintement, sentiment_model, sentiment_model_index + enable_llm_analyze_sintement = st.checkbox("Enable LLM (LLAMA)", value=False) + if enable_llm_analyze_sintement: + sentiment_model = st.selectbox("Select Model for Sentiment Analysis:", models) + sentiment_model_index = models.index(sentiment_model) + if st.button("Log Out", type="primary", use_container_width=True): + st.session_state.pop("logged_in", None) + st.session_state.pop("inst_id", None) + st.session_state.pop("inst_name", None) + st.session_state.pop("prog_id", None) + st.session_state.pop("user_type", None) + st.experimental_rerun() + st.button("Refresh", use_container_width=True) + + + cursor.execute(""" + SELECT subj_inst.subj_inst_id, subject.sub_name + FROM subj_inst + INNER JOIN subject + ON subj_inst.sub_id_code = subject.sub_id_code + """) + + # Assuming you have a DataFrame named 'filtered_data' + # and column_names is a list of column names you want to consider for calculating average scores + + # Convert all columns to numeric data + filtered_data[column_names] = filtered_data[column_names].apply(pd.to_numeric, errors='coerce') + + # Fetch subject data from the cursor + subject_data = cursor.fetchall() + + # Create a DataFrame for subject data + subject_df = pd.DataFrame(subject_data, columns=["subj_inst_id", "sub name"]) + + # Merge subject data with filtered data based on 'subj_inst_id' + filtered_data = filtered_data.merge(subject_df, on="subj_inst_id", how="left") + + # Group by subject name and calculate average scores + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + + # Calculate total average and add it as a new column + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + subject_avg_scores = filtered_data.groupby("sub name")[column_names].mean().reset_index() + subject_avg_scores["total average"] = subject_avg_scores[column_names].mean(axis=1) + + + + + cursor.execute(f""" + SELECT + s.class_id, + pr.prog_code || '-' || c.class_year || '-' || c.class_section AS class_info, + COUNT(DISTINCT s.stud_id) AS num_respondents, + ROUND((AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) + + AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) + + AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) + + AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10, 2) AS avg_overall, + ROUND((COUNT(DISTINCT s.stud_id) * (AVG(Teaching_Effectiveness) + AVG(Course_Organization) + AVG(Accessibility_and_Communication) + + AVG(Assessment_and_Grading) + AVG(Respect_and_Inclusivity) + AVG(Engagement_and_Interactivity) + + AVG(Feedback_and_Improvement) + AVG(Accessibility_of_Learning_Resources) + + AVG(Passion_and_Enthusiasm) + AVG(Professionalism_and_Ethical_Conduct)) / 10), 2) AS weighted_avg_overall + FROM + evaluation e + JOIN + student s ON e.stud_id = s.stud_id + JOIN + class c ON s.class_id = c.class_id + JOIN + program pr ON c.prog_id = pr.prog_id + WHERE + s.stud_id IN {tuple(list(filtered_data["stud_id"]))} + GROUP BY + s.class_id, pr.prog_code, c.class_year, c.class_section, class_info; + """) + + avg_scores_per_class = pd.DataFrame(cursor.fetchall(), columns=[ + "class_id", + "class_info", + "num_respondents", + "avg_overall", + "weighted_avg_overall" + ]) + + # Calculate the last row's weighted_avg_overall / num_respondents + last_row_index = avg_scores_per_class["weighted_avg_overall"].last_valid_index() + if last_row_index is not None: + avg_scores_per_class.at[last_row_index, "weighted_avg_overall"] /= avg_scores_per_class.at[last_row_index, "num_respondents"] + + # Convert the column to decimal.Decimal before rounding + avg_scores_per_class["weighted_avg_overall"] = avg_scores_per_class["num_respondents"] * avg_scores_per_class["avg_overall"] # avg_scores_per_class["weighted_avg_overall"].apply(lambda x: round(float(x), 2)) + + # Drop rows with None values + avg_scores_per_class = avg_scores_per_class.dropna() + + + # Calculate the overall averages for avg_overall and weighted_avg_overall + num_respondents = round(avg_scores_per_class["num_respondents"].sum(), 2) + overall_avg_overall = round(avg_scores_per_class["avg_overall"].mean(), 2) + overall_weighted_avg_overall = round(avg_scores_per_class["weighted_avg_overall"].sum(),2) + weighted_avg_overall = round(overall_weighted_avg_overall / num_respondents,2) + + # # Append an additional row for avg_overall and weighted_avg_overall + # avg_scores_per_class = avg_scores_per_class.append({ + # "class_id": int(avg_scores_per_class["class_id"].max()) + 1, + # "class_info": "Total", + # "num_respondents": avg_scores_per_class["num_respondents"].sum(), + # "avg_overall": round(overall_avg_overall, 2), + # "weighted_avg_overall": round(overall_weighted_avg_overall / avg_scores_per_class["num_respondents"].sum(), 2) + # }, ignore_index=True) + + # st.write(avg_scores_per_class.style.set_properties(**{'text-align': 'center'})) + + + + # Add summary rows to the DataFrame + avg_scores_per_class = avg_scores_per_class.append({ + "class_id": "", + "class_info": "Summary", + "num_respondents": num_respondents, + "avg_overall": " ", + "weighted_avg_overall": overall_weighted_avg_overall + }, ignore_index=True) + + + def get_color(weighted_avg_overall): + satisfaction_level = calculate_satisfaction(weighted_avg_overall) + if satisfaction_level == "Outstanding": + return "rgb(171, 235, 198 )" + elif satisfaction_level == "Above Average": + return "rgb(218, 247, 166)" + elif satisfaction_level == "Average": + return "rgb(255, 195, 0)" + elif satisfaction_level == "Below Average": + return "rgb(255, 87, 51)" + else: + return "rgb(255, 87, 51)" + + def calculate_satisfaction(weighted_avg_overall): + if weighted_avg_overall > 4: + return "Outstanding" + elif weighted_avg_overall > 3: + return "Above Average" + elif weighted_avg_overall > 2: + return "Average" + elif weighted_avg_overall > 1: + return "Below Average" + else: + return "Unsatisfactory" + + def highlight_cell(col, col_label, row_label): + # check if col is a column we want to highlight + if col.name == col_label: + # a boolean mask where True represents a row we want to highlight + mask = (col.index == row_label) + # return an array of string styles (e.g. ["", "background-color: yellow"]) + # return ["background-color: lightgreen" if val_bool else "" for val_bool in mask] + return [f"background-color: {get_color(weighted_avg_overall)}" if val_bool else "" for val_bool in mask] + else: + # return an array of empty strings that has the same size as col (e.g. ["",""]) + return np.full_like(col, "", dtype="str") + + + + + + avg_scores_per_class = avg_scores_per_class.append({ + "class_id": "", + "class_info": "Weighted Avg.", + "num_respondents": " ", # You can set this to "N/A" or any appropriate value + "avg_overall": calculate_satisfaction(weighted_avg_overall), # You can set this to "N/A" or any appropriate value + "weighted_avg_overall": weighted_avg_overall + }, ignore_index=True) + + + # # st.dataframe(avg_scores_per_class.style.background_gradient(subset=["C"], cmap="RdYlGn", vmin=0, vmax=2.5)) + + last_row = avg_scores_per_class.index[-1] + # avg_scores_per_class =avg_scores_per_class.style.apply(highlight_cell, col_label="avg_overall", row_label=last_row) + # Assuming avg_scores_per_class is your DataFrame + + + # Rename columns + avg_scores_per_class.rename(columns={'class_id': 'CLASS ID', + 'class_info': 'SECTION', + 'num_respondents': 'NO. of RESPONDENTS', + 'avg_overall': 'AVERAGE', + 'weighted_avg_overall': 'WEIGHTED AVERAGE'}, inplace=True) + + # Format numeric values to two decimal places + avg_scores_per_class = avg_scores_per_class.applymap(lambda x: '{:.2f}'.format(x) if isinstance(x, float) else x) + + # Get the last row index + last_row = avg_scores_per_class.index[-1] + + # Apply any specific styling + avg_scores_per_class = avg_scores_per_class.style.apply(highlight_cell, col_label="AVERAGE", row_label=last_row) + + # Drop index column + avg_scores_per_class.hide_index() + + # Render DataFrame without index column + # st.dataframe(avg_scores_per_class_no_index) + + # avg_scores_per_class.style.apply(lambda x: ["background: red" if v > x.iloc[3] else "" for v in x], axis = 1) + + # avg_scores_per_class = pd.DataFrame(avg_scores_per_class) + # avg_scores_per_class.set_index('CLASS ID', inplace=True) + # avg_scores_per_class.reset_index(drop=True, inplace=True) + # st.write(type(avg_scores_per_class)) + # avg_scores_per_class.reset_index(drop=True, inplace=True) + # st.markdown(avg_scores_per_class.style.hide(axis="index").to_html(), unsafe_allow_html=True) + # avg_scores_per_class1 = avg_scores_per_class.style.hide() + + + + # # Convert DataFrame to HTML without index column + # avg_scores_per_class_html = avg_scores_per_class.to_html(index=False) + + # Use CSS to hide the index column + avg_scores_per_class_html = avg_scores_per_class.render() + avg_scores_per_class_html = avg_scores_per_class_html.replace(' + # div.stButton > button:first-child { + # background-color: #0099ff; + # color:#ffffff; + # } + # div.stButton > button:hover { + # background-color: #397399; + # color:#ffffff; + # } + # """, unsafe_allow_html=True) + + + except Exception as e: + pass + # st.error(f"An error occurred: {str(e)}") + diff --git a/app5_selectbox/g4f_prompt.py b/app5_selectbox/g4f_prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..c70ea93f63e232e3579257b4a35cd24723bf1098 --- /dev/null +++ b/app5_selectbox/g4f_prompt.py @@ -0,0 +1,20 @@ +import g4f +def g4f_prompt(prompt): + # streamed completion + response = g4f.ChatCompletion.create( + model="gpt-3.5-turbo", + # provider=g4f.Provider.GeekGpt, + provider=g4f.Provider.You, # 1st best + # model="gpt-4", + # provider=g4f.Provider.Bing, # 2nd best + messages=[{"role": "user", "content": prompt}], + # stream=True, + ) + + res = "" + for message in response: + print(message, flush=True, end='') + res += message + + return res + \ No newline at end of file diff --git a/app5_selectbox/instructor.py b/app5_selectbox/instructor.py new file mode 100644 index 0000000000000000000000000000000000000000..cdcdaa8d5c1fa92211c4bf9d946048e25af27836 --- /dev/null +++ b/app5_selectbox/instructor.py @@ -0,0 +1,58 @@ +# instructor.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + + +# Function to fetch all instructors with their assigned program codes +def get_instructors_with_programs(): + cursor.execute(""" + SELECT instructor.inst_id, instructor.inst_name, program.prog_id, program.prog_code + FROM instructor + INNER JOIN program ON instructor.prog_id = program.prog_id + """) + data = cursor.fetchall() + df = pd.DataFrame(data, columns=['Instructor ID', 'Instructor Name', 'Program ID', 'Program code']) + return df + +def instructor(table_name): + inst_id = generate_unique_4(cursor,"inst_id", table_name) + inst_name = st.text_input("Instructor Name", key="inst_name").upper() + + # Get the available programs from the 'program' table + cursor.execute("SELECT prog_id, prog_name FROM program") + programs = cursor.fetchall() + program_names = [f"{prog_id} - {prog_name}" for (prog_id, prog_name) in programs] + prog_id = st.selectbox("Program", program_names, key="prog_id") + + if st.button("Insert Instructor Record"): + # Check if the inst_name and prog_id are provided + if not inst_name or not prog_id: + st.error("Instructor Name and Program are required. Please provide values for both fields.") + else: + # Extract the selected program_id from the program_names + selected_program = programs[program_names.index(prog_id)] + selected_prog_id = selected_program[0] + + # Check for duplicates in the 'instructor' table + cursor.execute("SELECT inst_id FROM instructor WHERE inst_name = %s AND prog_id = %s", (inst_name, selected_prog_id)) + existing_instructor = cursor.fetchone() + if existing_instructor: + st.error("Instructor with the same name and program already exists.") + else: + try: + # Insert a record into the instructor table + cursor.execute("INSERT INTO instructor (inst_id, inst_name, prog_id) VALUES (%s, %s, %s)", + (inst_id, inst_name, selected_prog_id)) + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + # display_table(cursor, table_name) + + + # Streamlit code + st.title("Instructors with Assigned Program Codes") + instructor_df = get_instructors_with_programs() + st.dataframe(instructor_df.style.set_properties(**{'text-align': 'center'})) \ No newline at end of file diff --git a/app5_selectbox/langchain_llama_gpu.py b/app5_selectbox/langchain_llama_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..0f413ea1d8e945e10fdf412318d866daba29b8d2 --- /dev/null +++ b/app5_selectbox/langchain_llama_gpu.py @@ -0,0 +1,229 @@ +# from langchain.llms import LlamaCpp +# from langchain import prompts, LLMChain +# from langchain.callbacks.manager import CallbackManager +# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + +# MODEL_PATH = "D:/FDM/llama-2-7b-chat.Q2_K.gguf" + +# # TODO: +# # install necesarry libraries. I already have langchain +# # installed, make sure you are running the latest +# # version of python 3.8.1+. I have tried with the CPU only and the GPU +# # 1. Create a function to generate prompt +# # 2. Create a function to load Llama-2 + + +# def create_prompt() -> prompts.PromptTemplate: +# """ +# Generates prompt template + +# :param: Takes in no parameters +# :return: a prompt template +# """ +# # Prompt obtained from langchain docs +# _DEFAULT_TEMPLATE: str = """Assistant is a large language model trained by Meta. + +# Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. + +# Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. + +# Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. + +# Human: {human_input} +# Assistant:""" + +# prompt: prompts.PromptTemplate = prompts.PromptTemplate( +# input_variables=["human_input"], template=_DEFAULT_TEMPLATE) + +# return prompt + + +# def load_model() -> LlamaCpp: +# # Callbacks support token-wise streaming +# # Verbose is required to pass to the callback manager +# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) + +# # n_gpu_layers - determines how many layers of the model are offloaded to your GPU. +# # n_batch - how many tokens are processed in parallel. + +# # Change this value based on your model and your GPU VRAM pool. +# n_gpu_layers = 40 +# # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU. +# n_batch = 512 + +# Llama_llm = LlamaCpp( +# model_path=MODEL_PATH, +# temperature=0.75, +# max_tokens=2000, +# n_gpu_layers=n_gpu_layers, +# n_batch=n_batch, +# top_p=1, +# callback_manager=callback_manager, +# verbose=True, +# ) + +# return Llama_llm + + +# def reponse(promt): + +# llm = load_model() +# model_prompt = promt +# # model_prompt: str = """ +# # Based from these students' feedback: I don't clearly understand the lesson, topic was not allinged with the course, Sir is always late, no proper use of learning materials. \n +# # Please generate a very short recommendation to the instructor. Make it in sentence type and in English language only. +# # """ + +# response = llm(prompt=model_prompt) + +# # print(response) +# return response + + + + + + + + + + + + + + + + + +# from langchain.llms import LlamaCpp +# from langchain import prompts, LLMChain +# from langchain.callbacks.manager import CallbackManager +# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + +# MODEL_PATH = "D:/FDM/llama-2-7b-chat.Q2_K.gguf" +# Llama_llm = None # Global variable to hold the LlamaCpp instance +# llm_chain = None # Global variable to hold the LLMChain instance + +# def create_prompt(human_input: str) -> prompts.PromptTemplate: +# """ +# Generates a custom prompt template. + +# :param human_input: The user's input that will be included in the prompt. +# :return: A prompt template with the specified user input. +# """ +# template = f"""Assistant is a large language model trained by Meta. + +# Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. + +# Human: {human_input} +# Assistant:""" + +# prompt = prompts.PromptTemplate(input_variables=["human_input"], template=template) + +# return prompt + +# def load_model() -> LLMChain: +# global Llama_llm, llm_chain + +# if Llama_llm is None: +# # Create the LlamaCpp instance only if it's not already loaded +# print("Loading LlamaCpp instance...") +# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) +# Llama_llm = LlamaCpp( +# model_path=MODEL_PATH, +# callback_manager=callback_manager, +# verbose=True, +# ) + +# if llm_chain is None: +# # Create the LLMChain instance only if it's not already loaded +# print("Creating LLMChain instance...") +# prompt = create_prompt("What is python programming?") +# llm_chain = LLMChain(llm=Llama_llm, prompt=prompt) + +# return llm_chain + +# # Load the model +# print("Attempting to load the model...") +# llm_chain = load_model() + +# # Now you can use llm_chain to make predictions without reloading the model +# question = "What is python programming?" +# custom_prompt = create_prompt(question) +# response = llm_chain.run({'human_input': question}) +# print("Model response:", response) + + + + + + + + + + + +from langchain.llms.llamacpp import LlamaCpp +from langchain.chains import LLMChain +from langchain.callbacks.manager import CallbackManager +from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler +from langchain.prompts import PromptTemplate + +MODEL_PATH = "D:/FDM/llama-2-7b-chat.Q2_K.gguf" +Llama_llm = None # Global variable to hold the LlamaCpp instance +llm_chain = None # Global variable to hold the LLMChain instance +n_gpu_layers = 40 +n_batch = 512 +def create_prompt_template(human_input: str) -> PromptTemplate: + """ + Generates a prompt template that only includes the "Human" input. + + :param human_input: The user's input that will be included in the prompt. + :return: A prompt template with the specified user input. + """ + prompt = PromptTemplate( + input_variables=["human_input"], # Make sure it's a list + template="{human_input}" + ) + + return prompt + +def load_model() -> LLMChain: + global Llama_llm, llm_chain + + if Llama_llm is None: + # Create the LlamaCpp instance only if it's not already loaded + print("Loading LlamaCpp instance...") + callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) + Llama_llm = LlamaCpp( + model_path=MODEL_PATH, + temperature=0.75, + max_tokens=2000, + n_gpu_layers=n_gpu_layers, + n_batch=n_batch, + top_p=1, + callback_manager=callback_manager, + verbose=True, + ) + + if llm_chain is None: + # Create the LLMChain instance only if it's not already loaded + print("Creating LLMChain instance...") + prompt = create_prompt_template("") + llm_chain = LLMChain(llm=Llama_llm, prompt=prompt) + + return llm_chain + +print("Attempting to load the model...") +llm_chain = load_model() +def langchain_input(prompt): + print("langchain_input............") + # Load the model + # Use llm_chain to make predictions with the updated prompt template + response = llm_chain.run(prompt) + # response = llm_chain.run("What is python programming?") + # print("Model response (without 'Assistant'):", response) + return response + + + diff --git a/app5_selectbox/llama2_prompt.py b/app5_selectbox/llama2_prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..eaf27b2c2124c2c994bcbc2365f8f818997d1603 --- /dev/null +++ b/app5_selectbox/llama2_prompt.py @@ -0,0 +1,30 @@ + +import streamlit as st + +model = st.session_state.model +tokenizer = st.session_state.tokenizer +pipeline_generator = st.session_state.pipeline_generator + + +def llama_prompt(prompt, pipeline=pipeline_generator): + sequences = pipeline( + prompt, + do_sample=True, + top_k=10, + num_return_sequences=1, + eos_token_id=tokenizer.eos_token_id, + max_length=2048, # max length of output, default=4096 + return_full_text=False, # to not repeat the question, set to False + # temperature=0.6, # default=0. + ) + + res = "" + for seq in sequences: + res += str(seq['generated_text']) + + return res + +# # Example usage: +# prompt = "What is the meaning of life?" +# result = llama_prompt(prompt) +# print(result) diff --git a/app5_selectbox/load_llama2.py b/app5_selectbox/load_llama2.py new file mode 100644 index 0000000000000000000000000000000000000000..19a3fc9059e7215a3a04809caf2aa37c16658d4e --- /dev/null +++ b/app5_selectbox/load_llama2.py @@ -0,0 +1,20 @@ +from transformers import AutoTokenizer, pipeline +import torch +import streamlit as st + +@st.cache_data(show_spinner="Loading models.. please wait") +def load(): + model = "meta-llama/Llama-2-13b-chat-hf" + tokenizer = AutoTokenizer.from_pretrained(model) + pipeline_generator = pipeline( + "text-generation", + model=model, + torch_dtype=torch.float16, + device_map="auto", + ) + + st.session_state.model = model + st.session_state.tokenizer = tokenizer + st.session_state.pipeline_generator = pipeline_generator + +# load() \ No newline at end of file diff --git a/app5_selectbox/naive_bayes_cl.py b/app5_selectbox/naive_bayes_cl.py new file mode 100644 index 0000000000000000000000000000000000000000..aed8526cfc36da8b4d155ca70ce641974f826ba3 --- /dev/null +++ b/app5_selectbox/naive_bayes_cl.py @@ -0,0 +1,192 @@ +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.feature_extraction.text import CountVectorizer +from sklearn.naive_bayes import MultinomialNB +import re +from sklearn.preprocessing import LabelEncoder +import joblib +import pickle +import gzip +import streamlit as st +import requests +import io + + +# Load and preprocess the data +def preprocess_text(text): + # Case folding and normalization + text= str(text) + text = text.lower() + text = re.sub(r'[^a-zA-Z\s]', '', text) + return text + +# def nb_clf(sample_comments): +# # # Load dataset +# # df = pd.read_csv('/home/aibo/prototype_v1/DATASET/thesis_final_dataset.csv') + +# # # Apply text preprocessing +# # df['cleaned_text'] = df['text'].apply(preprocess_text) + +# # # Encode labels +# # le = LabelEncoder() +# # df['label'] = le.fit_transform(df['label']) + +# # # Split the dataset into training and testing sets +# # train_df, test_df = train_test_split(df, test_size=0.2) + +# # # Create Bag-of-Words representation using CountVectorizer +# # vectorizer = CountVectorizer(max_features=5000) +# # X_train = vectorizer.fit_transform(train_df['cleaned_text']) +# # # X_test = vectorizer.transform(test_df['cleaned_text']) +# # y_train = train_df['label'] +# # # y_test = test_df['label'] + +# # # Initialize Naive Bayes classifier +# # nb_classifier = MultinomialNB() + +# # # Train Naive Bayes classifier +# # nb_classifier.fit(X_train, y_train) + +# # # # Save the trained model +# # # joblib.dump(nb_classifier, "/home/aibo/prototype_v1/NAIVE_BAYES/naive_bayes_sentiment_model.pkl") + +# # # Save the trained model to a pickle file +# # with open("/home/aibo/prototype_v1/NAIVE_BAYES/naive_bayes_sentiment_model.pkl", 'wb') as model_file: +# # pickle.dump(nb_classifier, model_file) + +# # # Save the CountVectorizer to a pickle file +# # with open('/home/aibo/prototype_v1/NAIVE_BAYES/vectorizer.pkl', 'wb') as vectorizer_file: +# # pickle.dump(vectorizer, vectorizer_file) + +# # # Load the trained Naive Bayes model +# # loaded_nb_model = joblib.load("/home/aibo/prototype_v1/NAIVE_BAYES/naive_bayes_sentiment_model.pkl") +# # Load the model from the pickle file + + +# ####################################### +# # Load LabelEncoder +# with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/label_encoder.pkl', 'rb') as label_encoder_file: +# le = pickle.load(label_encoder_file) + +# # Load the trained Naive Bayes classifier +# with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/naive_bayes_sentiment_model.pkl', 'rb') as model_file: +# loaded_nb_model = pickle.load(model_file) + +# # Load the CountVectorizer +# with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/vectorizer.pkl', 'rb') as vectorizer_file: +# loaded_vectorizer = pickle.load(vectorizer_file) + +# # Apply text preprocessing to the sample comments +# sample_comments_preprocessed = [preprocess_text(comment) for comment in sample_comments] + +# # Transform the sample comments using the same CountVectorizer +# sample_comments_transformed = loaded_vectorizer.transform(sample_comments_preprocessed) + +# # Use the loaded model for inference +# predicted_labels = loaded_nb_model.predict(sample_comments_transformed) + +# predicted_sentiment_label = le.inverse_transform(predicted_labels) + +# # # Decode the predicted labels using the loaded label encoder +# # le = LabelEncoder() +# # decoded_labels = le.inverse_transform(predicted_labels) +# # Mapping function +# # Map predictions to "negative" (0) and "positive" (1) +# # predicted_labels = ["negative" if pred == 0 else "positive" for pred in predicted_labels] + +# return sample_comments_preprocessed, predicted_sentiment_label + + +# # sample_comments = [ +# # "The disinterested teaching style of the instructor made it hard to fully comprehend and engage with the material", +# # "Hindi ko matukoy kung paano mapapakinabangan ang mga kasanayang ito sa totoong buhay", +# # "The course lacks real-world applications of machine learning that would enhance practical understanding.", +# # "your positivity is like a ray of sunshine on a cloudy day.", +# # "I'm grateful for the positive impact you've had on my education", +# # "The instructors' enthusiasm creates a positive learning environment where everyone feels encouraged to participate and ask questions", +# # "Hindi ako nakatutok sa lecture na ito", +# # "You show the true value of education.", +# # "Ipinapakita mo ang halaga ng pagiging positibo at pagiging bukas sa pagbabago sa aming mga buhay", +# # "You give meaning to our dreams.", +# # "Your class has ignited a passion for the subject in me", +# # "I didn't find the coursework challenging or stimulating", +# # "Napakahusay mong magbigay ng mga halimbawa na nagpapakita ng tunay na buhay na aplikasyon ng aming natutunan", +# # "You've provided valuable insights that will stay with me", +# # "I hoped for more enthusiasm and passion from our instructors", +# # "Your lessons shed light on our minds.", +# # "The instructor's lack of enthusiasm is reflected in the students' lack of interest", +# # "your perseverance in the face of challenges is truly admirable.", +# # "Minsan nakakalito ang pagkasunod-sunod ng mga topics", +# # "hindi mo maasahan sa bawat tanong", +# # "hindi sobrang magaling magturo si sir", +# # "not so bad, he teaches not very bad", +# # ] + + +# # print(nb_clf(sample_comments)) + +def read_bytes_from_url(url): + response = requests.get(url) + if response.status_code == 200: + # Read the content as bytes + pickle_bytes = response.content + + # Load LabelEncoder from the bytes + with gzip.open(io.BytesIO(pickle_bytes), 'rb') as label_encoder_file: + value = pickle.load(label_encoder_file) + return value + else: + print(f"Failed to fetch URL: {url}. Status code: {response.status_code}") + return None + +st.cache() +def nb_clf(sample_comments): + ## Load LabelEncoder locally + # with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/label_encoder.pkl', 'rb') as label_encoder_file: + # le = pickle.load(label_encoder_file) + + ## Load the le Hugging face hub + le = read_bytes_from_url('https://huggingface.co/MENG21/studfacultyeval-NAIVEBAYES/resolve/main/label_encoder.pkl') + + # ## Load the trained Naive Bayes classifier locally + # with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/naive_bayes_sentiment_model.pkl', 'rb') as model_file: + # loaded_nb_model = pickle.load(model_file) + + ## Load the loaded_nb_model Hugging face hub + loaded_nb_model = read_bytes_from_url('https://huggingface.co/MENG21/studfacultyeval-NAIVEBAYES/resolve/main/model.pkl') + + # ## Load the CountVectorizer locally + # with gzip.open(r'/home/aibo/prototype_v1/NAIVE_BAYES/vectorizer.pkl', 'rb') as vectorizer_file: + # loaded_vectorizer = pickle.load(vectorizer_file) + + ## Load the CountVectorizer Hugging face hub + loaded_vectorizer = read_bytes_from_url('https://huggingface.co/MENG21/studfacultyeval-NAIVEBAYES/resolve/main/vectorizer.pkl') + + # Apply text preprocessing to the sample comments + sample_comments_preprocessed = [preprocess_text(comment) for comment in sample_comments] + + # Transform the sample comments using the same CountVectorizer + sample_comments_transformed = loaded_vectorizer.transform(sample_comments_preprocessed) + + # Use the loaded model for inference + predicted_proba = loaded_nb_model.predict_proba(sample_comments_transformed) + + # Get predicted labels + predicted_labels = loaded_nb_model.predict(sample_comments_transformed) + predicted_sentiment_label = le.inverse_transform(predicted_labels) + + # Construct dictionary for predicted probabilities + predicted_proba_dict = [] + for prob_array in predicted_proba: + prob_dict = {'positive': prob_array[1]*100, 'negative': prob_array[0]*100} + predicted_proba_dict.append(prob_dict) + + # Return sample comments, predicted labels, and classification scores + # st.text(predicted_proba_dict) + return sample_comments, predicted_sentiment_label, predicted_proba_dict + +# sample_comments = [Your list of sample comments here] + +# Uncomment the following line to print the results +# print(nb_clf(sample_comments)) diff --git a/app5_selectbox/program.py b/app5_selectbox/program.py new file mode 100644 index 0000000000000000000000000000000000000000..c7e9870e5f62f32cd19f45754ccd24e9391087cb --- /dev/null +++ b/app5_selectbox/program.py @@ -0,0 +1,32 @@ +# program.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + + +def program(table_name): + prog_id = generate_unique_4(cursor, "prog_id", table_name) + prog_code = st.text_input("Program Code", key="prog_code").upper() + prog_name = st.text_input("Program Name", key="prog_name").upper() + + if st.button("Insert Program Record"): + # Check if any field is empty + if not prog_code or not prog_name: + st.error("Program Code and Program Name are required. Please provide values for both fields.") + else: + try: + # Check for duplicates + cursor.execute("SELECT prog_id FROM program WHERE prog_code = %s", (prog_code,)) + result = cursor.fetchone() + if result is not None: + st.error("A program with the same program code already exists.") + else: + # Insert a record into the program table + cursor.execute("INSERT INTO program (prog_id, prog_code, prog_name) VALUES (%s, %s, %s)", + (prog_id, prog_code, prog_name)) + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + display_table(cursor, table_name) \ No newline at end of file diff --git a/app5_selectbox/student.py b/app5_selectbox/student.py new file mode 100644 index 0000000000000000000000000000000000000000..2b45a9b7cbaf4ff79dc7ddc731c4863a660ed5db --- /dev/null +++ b/app5_selectbox/student.py @@ -0,0 +1,105 @@ +# student.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + + +import re +import pandas as pd +import streamlit as st + + +# Function to add data analysis to the student table +def analyze_student_data(cursor): + # Fetch distinct program codes + cursor.execute("SELECT DISTINCT prog_code FROM program") + program_codes = [code[0] for code in cursor.fetchall()] + + # Select a program code + selected_program = st.selectbox("Select Program Code", program_codes) + + # Update the SQL query to fetch the year along with the class section + cursor.execute(f"""SELECT class.class_section, class.class_year, COUNT(student.stud_id) + FROM class + LEFT JOIN student ON class.class_id = student.class_id + WHERE class.prog_id = (SELECT prog_id FROM program WHERE prog_code = '{selected_program}') + GROUP BY class.class_section, class.class_year""") + analysis_data = cursor.fetchall() + + # Create a DataFrame to display the updated analysis + df = pd.DataFrame(analysis_data, columns=['Section', 'Year', 'Number of Students']) + + # Display the updated analysis in a table + st.write(f"Data Analysis - Number of Students per Section in Program ({selected_program})") + st.dataframe(df) + +# Your code for connecting to the database and obtaining the cursor + +def student(table_name): + stud_id = generate_unique_4(cursor, "stud_id", table_name) + stud_name = st.text_input("Student Name", key="stud_name").upper() + stud_username = st.text_input("Student Username", key="stud_username") + stud_password = st.text_input("Student Password", type="password", key="stud_password") + + # Fetch data from the "class" and "program" tables to populate the selectboxes + cursor.execute("SELECT program.prog_code FROM program") + program_data = cursor.fetchall() + program_options = [code[0] for code in program_data] + + selected_program = st.selectbox("Select Program Code", program_options, key="selected_program") + + # Fetch class data based on the selected program + cursor.execute(f"""SELECT class.class_id, class.class_year, class.class_section, program.prog_code + FROM class + JOIN program ON class.prog_id = program.prog_id + WHERE program.prog_code = '{selected_program}'""") + + class_data = cursor.fetchall() + class_options = [f"{year}{section} - ID:{class_id}" for class_id, year, section, prog_code in class_data] + + selected_class = st.selectbox("Select Class", sorted(class_options), key="selected_class") + + if st.button("Insert Student Record"): + # Check if all fields are filled + if not stud_name or not stud_username or not stud_password or not selected_class: + st.error("All fields are required. Please provide values for all fields.") + else: + try: + # Extract class_id from the selected option + selected_class_id = re.search(r'ID:(\d+)', selected_class).group(1) + + # Check if the username is already taken + cursor.execute(f"SELECT stud_username FROM student WHERE stud_username = {stud_username}") + result = cursor.fetchone() + if result: + st.error("Username already taken. Please choose a different username.") + else: + # Insert a record into the student table + cursor.execute(f"""INSERT INTO student (stud_id, stud_name, stud_username, stud_password, class_id) + VALUES ({stud_id}, {stud_name}, {stud_username}, {stud_password}, {selected_class_id})""") + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + + # Call the data analysis function to display student data analysis + analyze_student_data(cursor) + + # Function to display the table with an additional column for concatenation + def display_student_table(cursor, table_name): + selected_program = st.selectbox("Select Program Code", program_options, key="display_student_table") + cursor.execute(f"SELECT s.*, c.class_year, c.class_section, p.prog_code, " + "p.prog_code || c.class_year || c.class_section AS custom_column " + f"FROM {table_name} s " + "JOIN class c ON s.class_id = c.class_id " + "JOIN program p ON c.prog_id = p.prog_id") + + data = cursor.fetchall() + df = pd.DataFrame(data, columns=['stud_id', 'stud_name', 'stud_username', 'stud_password', 'class_id', + 'is_eval', 'user_type', 'class_year', 'class_section', 'prog_code', 'custom_column']) + df = df[df['prog_code'] == selected_program] + st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + + # Call the function to display the table with the concatenated column + display_student_table(cursor, table_name) \ No newline at end of file diff --git a/app5_selectbox/subj_inst.py b/app5_selectbox/subj_inst.py new file mode 100644 index 0000000000000000000000000000000000000000..abfde8166758a2e4de1faef4665e3f5ffac9b50f --- /dev/null +++ b/app5_selectbox/subj_inst.py @@ -0,0 +1,160 @@ +# subj_inst.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 +import time +import ast + +# db_connection.disconnect() + +# Define a function to fetch and display the desired data +def display_combined_table(cursor, table_name): + try: + # Execute the SQL query to fetch the data with the desired columns + cursor.execute(""" + SELECT + si.subj_inst_id, + si.sub_id_code, + s.sub_name, -- Add subj_name to the query + si.inst_id, + i.inst_name, + si.class_id, + p.prog_code, + c.class_year, + c.class_section + FROM subj_inst si + INNER JOIN subject s ON si.sub_id_code = s.sub_id_code + INNER JOIN instructor i ON si.inst_id = i.inst_id + INNER JOIN class c ON si.class_id = c.class_id + INNER JOIN program p ON s.prog_id = p.prog_id + """) + data = cursor.fetchall() + + if not data: + st.warning("No data found.") + else: + # Create a DataFrame from the fetched data and set column names + df = pd.DataFrame(data, columns=[ + 'subj_inst_id', + 'sub_id_code', + 'subj_name', # Add subj_name to the columns + 'inst_id', + 'inst_name', + 'class_id', + 'prog_code', + 'class_year', + 'class_section' + ]) + + # Display the table with centered text + st.header(f"{table_name} Table") + + # Create select boxes for filtering + prog_code_filter = st.selectbox("Filter by Program Code", df['prog_code'].unique()) + class_year_filter = st.selectbox("Filter by Class Year", df['class_year'].unique()) + + # Filter sections based on the selected program code and class year + available_sections = df[(df['prog_code'] == prog_code_filter) & (df['class_year'] == class_year_filter)]['class_section'].unique() + + if len(available_sections) == 0: + st.warning("No sections available for the selected program code and class year.") + else: + section_filter = st.selectbox("Filter by Section", sorted(available_sections)) + + # Apply filters + filtered_df = df[(df['prog_code'] == prog_code_filter) & (df['class_year'] == class_year_filter) & (df['class_section'] == section_filter)] + + if filtered_df.empty: + st.warning("No matching records found.") + else: + print(filtered_df.style.set_properties(**{'text-align': 'center'})) + st.dataframe(filtered_df.style.set_properties(**{'text-align': 'center'})) + except Exception as e: + st.error(f"An error occurred while fetching data: {str(e)}") + + +def subj_inst(table_name): + subj_inst_id = generate_unique_4(cursor, "subj_inst_id", table_name) + + # Fetch program options from the program table + cursor.execute("SELECT prog_id, prog_code, prog_name FROM program") + program_options = cursor.fetchall() + prog_code = st.selectbox("Program Code", [str(prog[1]) for prog in program_options]) + + # Fetch class options from the class table filtered by prog_code + cursor.execute(""" + SELECT c.class_id, + c.class_year || ' - ' || c.class_section || ' (' || p.prog_code || ')' + FROM class c + INNER JOIN program p ON c.prog_id = p.prog_id + WHERE c.prog_id = ? + """, (program_options[[prog[1] for prog in program_options].index(prog_code)][0],)) + class_options = cursor.fetchall() + class_id = st.selectbox("Class Year and Section (Program)", sorted([str(cl[1]) for cl in class_options])) + + # Fetch instructor options from the instructor table + cursor.execute("SELECT inst_id, inst_name FROM instructor") + instructor_options = cursor.fetchall() + + # Filter subject_options to exclude those that already have assigned instructors in the selected class + cursor.execute(""" + SELECT sub_id_code + FROM subj_inst + WHERE class_id = ? + """, (class_options[[cl[1] for cl in class_options].index(class_id)][0],)) + assigned_subjects = set(row[0] for row in cursor.fetchall()) + + cursor.execute(""" + SELECT s.sub_id_code, s.sub_name + FROM subject s + WHERE s.prog_id = ? + """, (program_options[[prog[1] for prog in program_options].index(prog_code)][0],)) + subject_options = cursor.fetchall() + subject_options = [(sub[0], sub[1]) for sub in subject_options if sub[0] not in assigned_subjects] + + if not subject_options: + st.warning("All subjects in this class already have instructors.") + else: + sub_id_code = st.selectbox("Subject Name", [str(sub[1]) for sub in subject_options]) + + # Fetch selected subject's instructor options + cursor.execute("SELECT inst_id, inst_name FROM instructor") + instructor_options = cursor.fetchall() + inst_id = st.selectbox("Instructor Name", [str(inst[1]) for inst in instructor_options]) + + if st.button("Insert Subject Instructor Record"): + try: + # Extract the selected IDs from the displayed names + sub_id_code = subject_options[[sub[1] for sub in subject_options].index(sub_id_code)][0] + inst_id = instructor_options[[inst[1] for inst in instructor_options].index(inst_id)][0] + class_id = class_options[[cl[1] for cl in class_options].index(class_id)][0] + + # Check for duplicates + cursor.execute(""" + SELECT subj_inst_id + FROM subj_inst + WHERE sub_id_code = ? + AND inst_id = ? + AND class_id = ? + """, (sub_id_code, inst_id, class_id)) + duplicate_check = cursor.fetchone() + + if duplicate_check: + st.error("This combination of Subject, Instructor, and Class already exists.") + else: + # Insert a record into the subj_inst table + cursor.execute(""" + INSERT INTO subj_inst (subj_inst_id, sub_id_code, inst_id, class_id) + VALUES (?, ?, ?, ?) + """, (subj_inst_id, sub_id_code, inst_id, class_id)) + db_connection.commit() + st.success("Record inserted successfully.") + time.sleep(0.3) + st.experimental_rerun() + except Exception as e: + st.error(f"An error occurred: {str(e)}") + + display_combined_table(cursor, table_name) + +# # db_connection.reconnect() diff --git a/app5_selectbox/subject.py b/app5_selectbox/subject.py new file mode 100644 index 0000000000000000000000000000000000000000..214dbcd623c35f30eaf707d7f487680cf05926cc --- /dev/null +++ b/app5_selectbox/subject.py @@ -0,0 +1,57 @@ +# subject.py +import streamlit as st +import pandas as pd +from app5_selectbox.database_con import cursor, db_connection +from app5_selectbox.app5_selectbox_func import display_table, generate_unique_4 + +# Existing code for adding subjects +def subject(table_name): + sub_id_code = generate_unique_4(cursor, "sub_id_code", table_name) + sub_name = st.text_input("Subject Name", key="sub_name").upper() + + # Fetch program options from the database + cursor.execute("SELECT prog_id, prog_name FROM program") + program_options = cursor.fetchall() + prog_id_options = {str(prog[0]): prog[1] for prog in program_options} + + # Fetch academic year options from the database, including sem_num + cursor.execute("SELECT acad_id, ' - Semester ' || sem_num FROM academic_list") + academic_options = cursor.fetchall() + acad_id_options = {str(acad[0]): acad[1] for acad in academic_options} + + prog_id = st.selectbox("Program", list(prog_id_options.keys()), format_func=lambda x: prog_id_options[x]) + acad_id = st.selectbox("Academic Year and Semester", list(acad_id_options.keys()), format_func=lambda x: acad_id_options[x]) + + if st.button("Insert Subject Record"): + # Check if sub_name, prog_id, and acad_id are provided + if not sub_name or not prog_id or not acad_id: + st.error("Subject Name, Program, and Academic Year and Semester are required. Please provide values for all fields.") + else: + try: + # Check for duplicates + cursor.execute("SELECT sub_id_code FROM subject WHERE sub_name = ? AND prog_id = ? AND acad_id = ?", + (sub_name, prog_id, acad_id)) + duplicate_check = cursor.fetchone() + if duplicate_check: + st.error("A subject with the same name, program, and academic year and semester already exists.") + else: + # Insert a record into the subject table + cursor.execute("INSERT INTO subject (sub_id_code, sub_name, prog_id, acad_id) VALUES (?, ?, ?, ?)", + (sub_id_code, sub_name, prog_id, acad_id)) + db_connection.commit() + st.success("Record inserted successfully.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + + # New code for displaying the DataFrame + st.header("Filtered Subjects") + if prog_id and acad_id: + cursor.execute("SELECT sub_id_code, sub_name FROM subject WHERE prog_id = ? AND acad_id = ?", + (prog_id, acad_id)) + subject_data = cursor.fetchall() + if subject_data: + df = pd.DataFrame(subject_data, columns=["sub_id_code", "sub_name"]) + st.dataframe(df.style.set_properties(**{'text-align': 'center'})) + else: + st.warning("No subjects found for the selected Program and Academic Year/Semester.") + display_table(cursor, table_name) diff --git a/database/data.sqlite b/database/data.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..e30c9428fa7c2eafa74b52f458e2980b157367e5 Binary files /dev/null and b/database/data.sqlite differ diff --git a/database/data111.sqlite b/database/data111.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..ea65a495428b2fcbdb58ee0d320d779f88dbd90a Binary files /dev/null and b/database/data111.sqlite differ diff --git a/database/data_2_4_24.sqlite b/database/data_2_4_24.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..2e58c629368c025d763e62a57fe4aa43fb2453f6 Binary files /dev/null and b/database/data_2_4_24.sqlite differ diff --git a/database/data_old.sqlite b/database/data_old.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..e5568a6fc87ae0a05552780d3c45818690daf65d Binary files /dev/null and b/database/data_old.sqlite differ diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..48d88b8b0982e100d1cd534b6dd24b2edce6359e --- /dev/null +++ b/main.py @@ -0,0 +1,93 @@ +# main.py +import streamlit as st + +st.set_page_config( + page_title="Faculty Evaluation", + layout="centered" +) + + +# from app5_selectbox import load_llama2 # load_llama2() or initialized +# load_llama2.load() + + +from app5_selectbox import academic_list, class_tbl, instructor, program, student, subject, subj_inst, evaluation +import app5 +from app5_selectbox.database_con import cursor, db_connection + + +# db_connection.reconnect() + +# Streamlit app title +# st.title("University Evaluation App") + +app5.app5() + + + + +# Close the database connection +# db_connection.close() +# db_connection.disconnect() + + + + +#################################### 2/4/24 + +# # main.py +# import streamlit as st +# from app5_selectbox import academic_list, class_tbl, instructor, program, student, subject, subj_inst, evaluation +# import app5 +# from app5_selectbox.database_con import cursor, db_connection + +# # db_connection.reconnect() + +# # Streamlit app title +# st.title("University Evaluation App") + +# # Create a Streamlit sidebar to select the table for insertion +# table_name = st.sidebar.selectbox("Select Table", ("academic_list", "class", "instructor", "program", "student", "subject", "subj_inst", "evaluation", "app5")) + +# if table_name == "academic_list": +# # Include academic_list-specific code from academic_list.py +# academic_list.academic_list(table_name) +# # pass +# elif table_name == "class": +# # Include class-specific code from class.py +# class_tbl.class_tbl(table_name) +# # pass +# elif table_name == "instructor": +# # Include instructor-specific code from instructor.py +# instructor.instructor(table_name) +# # pass +# elif table_name == "program": +# # Include program-specific code from program.py +# program.program(table_name) +# # pass +# elif table_name == "student": +# # Include student-specific code from student.py +# student.student(table_name) +# # pass +# elif table_name == "subject": +# # Include subject-specific code from subject.py +# subject.subject(table_name) +# # pass +# elif table_name == "subj_inst": +# # Include subj_inst-specific code from subj_inst.py +# subj_inst.subj_inst(table_name) +# # pass +# elif table_name == "evaluation": +# # Include table_name-specific code from table_name.py +# evaluation.evaluation() +# # pass +# elif table_name == "app5": +# # Include table_name-specific code from table_name.py +# app5.app5() +# # pass +# else: +# st.error("Select a valid table from the sidebar.") + +# # Close the database connection +# # db_connection.close() +# # db_connection.disconnect() \ No newline at end of file