import os import sys sys.path.append(os.path.abspath('/Users/huonglan/Documents/codeproject/IRL-MOOC')) import streamlit as st import numpy as np import matplotlib.pyplot as plt from collections import defaultdict import pandas as pd import tensorflow as tf import pickle import models.maxcausal as maxcausal from utils.data_helper import * from environment import raw_world as Env from utils import irl_helper from utils.trajectory_comparison import analyze_chapter_engagement from typing import List, Dict, Union, Tuple import plotly.graph_objects as go from plotly.subplots import make_subplots st.set_page_config(layout="wide") from utils.evaluation import * from utils.rnn_models import * from PIL import Image def performance_prediction(model, x_test): y_pred = model.predict(x_test) y_pred = np.array([1 if y[0] >= 0.5 else 0 for y in y_pred]) num_fail = sum(y_pred) num_pass = len(y_pred) - num_fail return num_fail, num_pass, y_pred def plot_performance(df=None, show_image=False, show_performance=False, col_image=None, col_performance=None): if show_image and col_image is not None: with col_image: fig = Image.open('streamlit-assets/whatif_results_week_6.png') st.image(fig, caption="Predicted effectiveness of interventions in week 6. \ For example, adding content from topic 7 might improve students' performance, \ while adding content from week 2 might harm", use_container_width=True) if show_performance and df is not None and col_performance is not None: with col_performance: fig = go.Figure() bar_height_px = 250 total_height = bar_height_px * len(df) fig.add_trace(go.Bar( y=df['class'], x=df['fail_percent'], name='Fail', orientation='h', text=[f'{x:.1f}%' for x in df['fail_percent']], textfont=dict(size=25), textposition='auto', customdata=df[['fail_num']], hovertemplate='%{y}
No. Students: %{customdata[0]}', marker=dict(color='#c0392b') )) fig.add_trace(go.Bar( y=df['class'], x=df['pass_percent'], name='Pass', orientation='h', text=[f'{x:.1f}%' for x in df['pass_percent']], textfont=dict(size=25), textposition='auto', customdata=df[['pass_num']], hovertemplate='%{y}
No. Students: %{customdata[0]}', marker=dict(color='#27ae60') )) fig.update_layout( barmode='stack', xaxis=dict(title='Percentage', range=[0, 100]), legend=dict(orientation='h', yanchor='bottom', y=1.02, xanchor='right', x=1), height=total_height, margin=dict(l=200, r=40, t=120, b=60), ) st.plotly_chart(fig, use_container_width=True) def compare_chapter_engagement(syn_trajectories: Union[List, np.ndarray, pd.Series], real_trajectories: Union[List, np.ndarray, pd.Series], world, save_dir=None) -> Tuple[Dict, Dict]: """ Compare engagement metrics between real and synthetic students per chapter Parameters: ----------- real_trajectories : Union[List, np.ndarray, pd.Series] Real student trajectories syn_trajectories : Union[List, np.ndarray, pd.Series] Synthetic student trajectories world : ClickstreamWorld World object containing state and action information save_dir : str, optional Directory to save visualizations Returns: -------- Tuple[Dict, Dict]: Real and synthetic engagement metrics """ st.header("Engagement Analysis: Before vs. After Intervention") real_metrics = analyze_chapter_engagement(real_trajectories, world, "Real") # print('finished real metrics, starting synthetic metrics') syn_metrics = analyze_chapter_engagement(syn_trajectories, world, "Synthetic") metrics_to_plot = [ ('visit_count', 'Total Visits'), ('completion_rate', 'Completion Rate (%)'), ('problem_attempts', 'Quiz Attempts'), ('video_views', 'Video Views') ] increase_color = '#27ae60' # Darker green decrease_color = '#c0392b' neutral_color = '#bdc3c7' plotly_figures = [] for idx, (metric, ylabel) in enumerate(metrics_to_plot): chapters = sorted(set(real_metrics.keys()) | set(syn_metrics.keys())) real_values = [real_metrics.get(ch, {}).get(metric, 0) for ch in chapters] syn_values = [syn_metrics.get(ch, {}).get(metric, 0) for ch in chapters] pct_diffs = [] colors = [] for real_val, syn_val in zip(real_values, syn_values): if real_val > 0: pct_diff = ((syn_val - real_val) / real_val) * 100 colors.append(increase_color if pct_diff > 0 else decrease_color) else: pct_diff = 100 if syn_val > 0 else 0 colors.append(increase_color if syn_val > 0 else neutral_color) pct_diffs.append(pct_diff) fig = go.Figure() fig.add_trace( go.Bar( x=chapters, y=pct_diffs, marker=dict(color=colors, line=dict(color='#2c3e50', width=0.5)), name=ylabel, showlegend=False, text=[f'{val:+.1f}%' for val in pct_diffs], textposition='auto', hovertemplate='%{x}
%{y:.1f}%', width=0.6 ) ) fig.update_layout( title=dict( text=ylabel, x=0.5, xanchor='center', font=dict(size=20) ), font=dict(size=14), margin=dict(l=50, r=40, t=60, b=50), hovermode="x unified", bargap=0.25, template='plotly' ) fig.update_xaxes( title_text='Topic', title_font=dict(size=16), tickvals=chapters, ticktext=[f'{ch}' for ch in chapters], tickfont=dict(size=13), showgrid=False, zeroline=False ) fig.update_yaxes( title_text='Change (%)', title_font=dict(size=16), tickfont=dict(size=13), zeroline=True, zerolinecolor='white', zerolinewidth=1.5, showgrid=True, ) plotly_figures.append(fig) col1, col2 = st.columns(2) with col1: st.caption("Total Visits: Number of interactions made by students for every course material group by Topic") st.plotly_chart(plotly_figures[0], use_container_width=True) with col2: st.caption("Completion Rate: The percentage of course materials that students interacted with within each topic. A value of 100% means the student engaged with every material in that topic at least once.") st.plotly_chart(plotly_figures[1], use_container_width=True) col3, col4 = st.columns(2) with col3: st.caption("Quiz Attempts: Number of quiz attempts made by students for every quiz group by Topic") st.plotly_chart(plotly_figures[2], use_container_width=True) with col4: st.caption("Video Views: Number of video views made by students for every video group by Topic") st.plotly_chart(plotly_figures[3], use_container_width=True) def add_new_state(world, trajectories, event_data, week_list=range(7,11), test_ids=None, OUTPUT_DIR='results/whatif/dsp-002', trajectories_each_week=None, trajectories_each_week_pass=None, trajectories_each_week_fail=None, history_whatif_pass=None, history_whatif_fail=None, fail_only=False): print("Adding new state with parameters:", event_data) results = { 'week': [], 'real_trajectories': [], 'syn_trajectories': [], 'world': [] } for week in week_list: print(f"\nProcessing week {week}") world._reset_transition_prob_table() world._update_transition_prob_table(trajectories) add_feature_matrix = world.designed_features(world.values) chapter = event_data['chapter'] is_problem = True if event_data['event_type'] in 'quiz' else False value = float(event_data['difficulty']) if is_problem else float(event_data['duration']) print(f'Adding event - Topic: {chapter}, Type: {"Quiz" if is_problem else "Video"}, Value: {value}') add_feature_matrix = world.add_new_state(chapter=chapter, value=value, is_problem=is_problem, predict=True) pred = pd.read_csv(f'{OUTPUT_DIR}/predictions_weeks_{week}.csv') pred = pred['y_pred'] real_student = trajectories_each_week[week-1][test_ids] if fail_only: real_student = [real_student.iloc[i] for i in range(len(real_student)) if pred[i] == 1] else: real_student = [real_student.iloc[i] for i in range(len(real_student))] syn_students = irl_helper.make_syn_student_personalized( trajectories_pass=[trajectories_each_week_pass[week-1]], trajectories_fail=[trajectories_each_week_fail[week-1]], students=[trajectories_each_week[week-1]], history_whatif_pass=[history_whatif_pass[week-1]], history_whatif_fail=[history_whatif_fail[week-1]], feature_matrix=add_feature_matrix, world=world, num_week=1, is_start_state_arr=False, test_labels=pred, new_state=True, test_ids=test_ids, fail_only=fail_only ) syn_students = [syn_students[i][0] for i in range(len(syn_students))] if len(week_list) > 1: results['week'].append(week) results['real_trajectories'].append(real_student) results['syn_trajectories'].append(syn_students) results['world'].append(world) else: return syn_students, real_student, world return results def get_top_rewards_per_week(df, top_n=3): # Ensure 'Week' is a column to group by if 'Week' not in df.columns: st.error(f"DataFrame is missing 'Week' column: {df.columns}") return pd.DataFrame() df_sorted = df.sort_values(by=['Week', 'Reward'], ascending=[True, False]) top_n_df = df_sorted.groupby('Week').head(top_n).reset_index(drop=True) return top_n_df def show_highest_rewards(course_id='dsp-002'): st.header("🏆 Leaderboard: Top Course Material per Week") st.markdown("This section displays the top 3 videos and quizzes each week that passing students in previous year find most engaging and motivating.") df_video = pd.read_csv(f'results/whatif/{course_id}/sorted_reward_video.csv') df_problem = pd.read_csv(f'results/whatif/{course_id}/sorted_reward_problem.csv') top_3_video = get_top_rewards_per_week(df_video, top_n=3) top_3_problem = get_top_rewards_per_week(df_problem, top_n=3) tab_video, tab_problem = st.tabs(["Videos", "Quizzes"]) with tab_video: st.subheader("Top Videos by Week") if not top_3_video.empty: weeks = sorted(top_3_video['Week'].unique()) week_tabs = st.tabs([f"Week {w+1}" for w in weeks]) for week, week_tab in zip(weeks, week_tabs): with week_tab: st.markdown(f"**Top Videos - Week {week+1}**") week_data = top_3_video[top_3_video['Week'] == week] week_data['duration'] = week_data['duration'].apply(lambda x: f"{int(x)//60} min {int(x)%60} sec") display_cols = ['title', 'chapter', 'subchapter', 'duration', 'date'] rename_cols = {'title': 'Name', 'chapter': 'Topic', 'subchapter': 'Sub-Topic','duration': 'Duration','date': 'Published Date'} display_data = week_data[display_cols].dropna(axis=1, how='all').rename(columns=rename_cols) st.dataframe(display_data, use_container_width=False, hide_index=True) else: st.info("No video event data available to display top rewards.") with tab_problem: st.subheader("Top Quizzes by Week") if not top_3_problem.empty: weeks = sorted(top_3_problem['Week'].unique()) week_tabs = st.tabs([f"Week {w+1}" for w in weeks]) for week, week_tab in zip(weeks, week_tabs): with week_tab: st.markdown(f"**Top Quizzes - Week {week+1}**") week_data = top_3_problem[top_3_problem['Week'] == week] week_data['Difficulty'] = week_data['Difficulty'].apply(lambda x: f"{x:.2f}") display_cols = ['title', 'chapter', 'subchapter', 'grade_max', 'Difficulty','date'] rename_cols = {'title': 'Name', 'chapter': 'Topic', 'subchapter': 'Sub-Topic', 'grade_max': 'Max Grade', 'Difficulty': 'Difficulty Level (0-1)', 'date': 'Published Date'} display_data = week_data[display_cols].dropna(axis=1, how='all').rename(columns=rename_cols) st.dataframe(display_data, use_container_width=False, hide_index=True) else: st.info("No problem event data available to display top rewards.") class StateManager: def __init__(self, **kwargs): self.function_to_add_state = add_new_state self._kwargs = kwargs week = kwargs.get('week_list', [6])[0] # percentile = self.week / 10 # course_id = kwargs.get('course_id', 'dsp-002') # week_type = 'eq_week' DATA_DIR = 'results/whatif/dsp-002/' model_path = { 5: 'lstm-bi-32-64-5-1722490972.1859/model.keras_final_e.keras', 6: 'lstm-bi-32-64-6-1722494926.4949/model.keras_final_e.keras', 7: 'lstm-bi-32-64-7-1722499225.71723/model.keras_final_e.keras', 8: 'lstm-bi-32-64-8-1722504182.3553/model.keras_final_e.keras', 9: 'lstm-bi-32-64-9-1722511435.7777/model.keras_final_e.keras', 10: 'lstm-bi-32-64-10-1722519098.62673/model.keras_final_e.keras', } self.reconstructed_model = tf.keras.models.load_model(f'checkpoints/{model_path[week]}') self.test_ids = np.load(f'{DATA_DIR}/test_students_5.npy') self.x_test = np.load(f'{DATA_DIR}/real-data-early-prediction_dsp-002_1to10_ver2.npy_features.npy')[self.test_ids, :, :] print(f"Loaded x_test shape: {self.x_test.shape}") # labels = pd.read_csv(f'data/{course_id}/early-prediction_{course_id}_labels.csv')['label-pass-fail'] # self.y_test = labels[test_ids] def add_state(self, event_data, fail_only=False): return self.function_to_add_state(**self._kwargs, event_data=event_data, fail_only=fail_only) def show_metadata(): st.subheader("Course Information") st.markdown("""On the left is the skill set for each topic; on the right is a visual representation of the prerequisite skill structure. The taught skills are divided into three categories: 1. Core skills: Fundamental skills that are essential for understanding the course material. 2. Applied skills: Skills that apply the core skills in practical scenarios. 3. Theory-based extension: Advanced skills that build upon the core skills. """) col1, col2 = st.columns([1, 2]) df = pd.read_csv('streamlit-assets/dsp_prerequisite_skills_2.csv') with col1: st.subheader("📋 Topic Table") st.dataframe(df[['Topic', 'Skills', 'No. Videos', 'No. Quizzes']], use_container_width=False, hide_index=True) with col2: st.subheader("Prerequisite skill structure") fig = Image.open('streamlit-assets/prerequisite_skills_structure.png') st.image(fig, caption='**Blue**: Core skills, **Pink**: Applied skills, **Purple**: Theory-based extension', use_container_width=False) def main(): st.title("What-if Classroom for Course Design: Digital Signal Processing (DSP)") st.markdown( """

Description:

\n You are teaching a 10-week Digital Signal Preprocessing (DSP) course on a MOOC platform and have just completed the first 5 weeks. You're now considering whether to add new content in week 6.\n The platform provides access to detailed clickstream data that tracks how students in previous years interacted with videos, quizzes.\n You want to identify which content is most effective in supporting student success. This demo walks you through how to use our simulation model to preview the impact of adding new materials before implementing them in reality. \n """, unsafe_allow_html=True ) show_metadata() course_id = 'dsp-002' datadir = 'data/mooc_raw/coursera' with open('trajectories_each_week.pkl', 'rb') as f: trajectories_each_week = pickle.load(f) with open('trajectories_each_week_pass.pkl', 'rb') as f: trajectories_each_week_pass = pickle.load(f) with open('trajectories_each_week_fail.pkl', 'rb') as f: trajectories_each_week_fail = pickle.load(f) with open('trajectories.pkl', 'rb') as f: trajectories = pickle.load(f) with open('history_whatif_fail_200.pkl', 'rb') as f: history_whatif_fail = pickle.load(f) with open('history_whatif_pass_200.pkl', 'rb') as f: history_whatif_pass = pickle.load(f) combinedg = pd.read_csv(f'data/{course_id}/combinedg_features_{course_id}_processed.csv') with open(f'data/{course_id}/dict_event.pkl', 'rb') as f: dict_event = pickle.load(f) with open(f'data/{course_id}/dict_action.pkl', 'rb') as f: dict_action = pickle.load(f) with open(f'data/{course_id}/map_week.pkl', 'rb') as f: map_week = pickle.load(f) with open(f'data/{course_id}/map_event_id.pkl', 'rb') as f: map_event_id = pickle.load(f) with open(f'data/{course_id}/map_action.pkl', 'rb') as f: map_action = pickle.load(f) with open(f'data/{course_id}/problem_event.pkl', 'rb') as f: problem_event = pickle.load(f) with open(f'data/{course_id}/video_event.pkl', 'rb') as f: video_event = pickle.load(f) schedule = pd.read_csv(f'{datadir}/schedule/{course_id}.csv') values = whatif_values(combinedg, schedule, map_event_id=map_event_id, problem_event=problem_event) ids = np.load(f'results/whatif/{course_id}//test_students_5.npy') ###### Finish loading data ###### show_highest_rewards() world = Env.ClickstreamWorld(trajectories=trajectories, dict_action=dict_action, dict_event=dict_event, video_arr=video_event, problem_arr=problem_event, values=values, add_state=True) state_manager = StateManager(world=world, trajectories=trajectories, test_ids=ids, trajectories_each_week=trajectories_each_week, trajectories_each_week_pass=trajectories_each_week_pass, trajectories_each_week_fail=trajectories_each_week_fail, history_whatif_pass=history_whatif_pass, history_whatif_fail=history_whatif_fail, week_list=[6]) performance_col1, performance_col2 = st.columns([1, 2]) plot_performance(show_image=True, col_image=performance_col1) fail_num, pass_num, _ = performance_prediction(state_manager.reconstructed_model, state_manager.x_test) real_pred_df = pd.DataFrame({ 'class': ['Perdicted Performance Before Intervention'], 'fail_num': [fail_num], 'pass_num': [pass_num], 'fail_percent': [fail_num / (fail_num + pass_num) * 100], 'pass_percent': [pass_num / (fail_num + pass_num) * 100] }) st.sidebar.header("Introduce new course content") st.sidebar.markdown("Add new course content to see how it affects student performance.") chapter = st.sidebar.slider('Topic:', 1, 10, 1) event_type = st.sidebar.selectbox('Event Type:', ['quiz', 'video']) if event_type == 'quiz': value_label = 'Difficulty:' else: value_label = 'Duration:' value = st.sidebar.slider(value_label, 0.0, 1.0, 0.5, 0.01) fail_only = st.sidebar.selectbox('Simulate low-performed students only:', [False, True], format_func=lambda x: 'Yes' if x else 'No') if st.sidebar.button('Add State', key='add_state_button'): event_data = { 'chapter': chapter, 'event_type': event_type, 'difficulty': value, 'duration': value, 'fail_only': fail_only } with st.spinner("Analyzing trajectories..."): results = state_manager.add_state(event_data, fail_only=fail_only) st.session_state.results = results # Display the results if they exist if 'results' in st.session_state: import utils.data_helper as data_helper if isinstance(st.session_state.results, dict): syn = st.session_state.results['syn_trajectories'] real = st.session_state.results['real_trajectories'] world = st.session_state.results['world'] else: syn, real, world = st.session_state.results course_features = data_helper.trajectories_to_features( syn, world, num_week=1, syn=True, save_to_disk=False ) x_test = np.concatenate((state_manager.x_test[:, :(state_manager.x_test.shape[0]-100), :], course_features), axis=1) fail_num, pass_num, _ = performance_prediction( state_manager.reconstructed_model, x_test ) temp_df = pd.DataFrame({ 'class': ['Predicted Performance After Intervention'], 'fail_num': [fail_num], 'pass_num': [pass_num], 'fail_percent': [fail_num / (fail_num + pass_num) * 100], 'pass_percent': [pass_num / (fail_num + pass_num) * 100] }) merged_df = pd.concat([temp_df, real_pred_df], ignore_index=True) st.session_state.performance_df = merged_df plot_performance(st.session_state.performance_df, show_performance=True, col_performance=performance_col2) compare_chapter_engagement(syn, real, world) if __name__ == '__main__': main() DATA_DIR = 'checkpoints/' model_path = { 5: 'lstm-bi-32-64-5-1722490972.1859/model.keras_final_e.keras', 6: 'lstm-bi-32-64-6-1722494926.4949/model.keras_final_e.keras', 7: 'lstm-bi-32-64-7-1722499225.71723/model.keras_final_e.keras', 8: 'lstm-bi-32-64-8-1722504182.3553/model.keras_final_e.keras', 9: 'lstm-bi-32-64-9-1722511435.7777/model.keras_final_e.keras', 10: 'lstm-bi-32-64-10-1722519098.62673/model.keras_final_e.keras', }