Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| # Ensure task-creation flags exist before any imports that might read them | |
| if 'show_success' not in st.session_state: | |
| st.session_state.show_success = False | |
| if 'last_task_key' not in st.session_state: | |
| st.session_state.last_task_key = None | |
| if 'last_task_url' not in st.session_state: | |
| st.session_state.last_task_url = None | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| from second import double_main | |
| from multiple import multiple_main | |
| from multiple import display_story_points_stats | |
| from jira_integration import render_jira_login, JIRA_SERVER | |
| from weekly import generate_weekly_report | |
| from pre import preprocess_uploaded_file, add_app_description | |
| from multi_env_compare import multi_env_compare_main | |
| import multiple_env_loader | |
| def single_main(uploaded_file): | |
| if uploaded_file is not None: | |
| # Process the file with header | |
| data = preprocess_uploaded_file(uploaded_file) | |
| # Display debugging information | |
| st.write("Data shape:", data.shape) | |
| st.write("Unique functional areas:", data['Functional area'].nunique()) | |
| st.write("Sample of data:", data.head()) | |
| # Display scenarios with status "failed" grouped by functional area | |
| failed_scenarios = data[data['Status'] == 'FAILED'] | |
| passed_scenarios = data[data['Status'] == 'PASSED'] | |
| # Display total count of failures | |
| fail_count = len(failed_scenarios) | |
| st.markdown(f"Failing scenarios Count: {fail_count}") | |
| # Display total count of Passing | |
| pass_count = len(passed_scenarios) | |
| st.markdown(f"Passing scenarios Count: {pass_count}") | |
| # Use radio buttons for selecting status | |
| selected_status = st.radio("Select a status", ['Failed', 'Passed']) | |
| # Determine which scenarios to display based on selected status | |
| if selected_status == 'Failed': | |
| unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All") | |
| selected_scenarios = failed_scenarios | |
| elif selected_status == 'Passed': | |
| unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All") | |
| selected_scenarios = passed_scenarios | |
| else: | |
| selected_scenarios = None | |
| if selected_scenarios is not None: | |
| # st.write(f"Scenarios with status '{selected_status}' grouped by functional area:") | |
| st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:") | |
| # Display count of unique functional areas | |
| # st.write(f"Number of unique functional areas: {len(unique_areas) - 1}") # Subtract 1 for "All" | |
| # Select a range of functional areas to filter scenarios | |
| selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"]) | |
| if "All" in selected_functional_areas: | |
| filtered_scenarios = selected_scenarios | |
| else: | |
| filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)] | |
| if not selected_functional_areas: # Check if the list is empty | |
| st.error("Please select at least one functional area.") | |
| else: | |
| # Display count of filtered scenarios | |
| st.write(f"Number of filtered scenarios: {len(filtered_scenarios)}") | |
| # Calculate the average time spent for each functional area | |
| average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index() | |
| # Convert average time spent from seconds to minutes and seconds format | |
| average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S') | |
| # Group by functional area and get the start datetime for sorting | |
| start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index() | |
| # Merge average_time_spent_seconds and start_datetime_group | |
| average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area') | |
| # Filter scenarios based on selected functional area | |
| if selected_status == 'Failed': | |
| # Check if Failed Step column exists | |
| if 'Failed Step' in filtered_scenarios.columns: | |
| grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Failed Step', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) | |
| else: | |
| grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) | |
| elif selected_status == 'Passed': | |
| grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True)) | |
| else: | |
| grouped_filtered_scenarios = None | |
| grouped_filtered_scenarios.reset_index(inplace=True) | |
| # Only drop 'level_1' if it exists in the DataFrame | |
| if 'level_1' in grouped_filtered_scenarios.columns: | |
| grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True) | |
| grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1 | |
| st.dataframe(grouped_filtered_scenarios) | |
| # Sort the average time spent table by start datetime | |
| average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime') | |
| # Display average time spent on each functional area in a table | |
| st.markdown("### Average Time Spent on Each Functional Area") | |
| average_time_spent_seconds.index = average_time_spent_seconds.index + 1 | |
| st.dataframe(average_time_spent_seconds) | |
| # Check if selected_status is 'Failed' and grouped_filtered_scenarios length is less than or equal to 400 | |
| if selected_status != 'Passed' and len(grouped_filtered_scenarios) <= 400: | |
| # Create and display bar graph of errors by functional area | |
| st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:") | |
| error_counts = grouped_filtered_scenarios['Functional area'].value_counts() | |
| # Only create the graph if there are errors to display | |
| if not error_counts.empty: | |
| plt.figure(figsize=(10, 6)) | |
| plt.bar(error_counts.index, error_counts.values) | |
| plt.xlabel('Functional Area') | |
| plt.ylabel('Number of Failures') | |
| plt.title(f"Number of '{selected_status}' scenarios by Functional Area") | |
| plt.xticks(rotation=45, ha='right') | |
| # Set y-axis limits and ticks for consistent interval of 1 | |
| y_max = max(error_counts.values) + 1 | |
| plt.ylim(0, y_max) | |
| plt.yticks(range(0, y_max, 1)) | |
| # Display individual numbers on y-axis | |
| for i, count in enumerate(error_counts.values): | |
| plt.text(i, count, str(count), ha='center', va='bottom') | |
| plt.tight_layout() # Add this line to adjust layout | |
| st.pyplot(plt) | |
| else: | |
| st.info(f"No '{selected_status}' scenarios found to display in the graph.") | |
| else: | |
| st.write("### No scenarios with status 'failed' found.") | |
| pass | |
| def main(): | |
| add_app_description() | |
| # Initialize task‑creation session_state defaults | |
| for key, default in [('show_success', False), ('last_task_key', None), ('last_task_url', None)]: | |
| if key not in st.session_state: | |
| st.session_state[key] = default | |
| # --- Centralized Sidebar Initialization --- | |
| # Initialize session state for Jira and sprint data if they don't exist | |
| if 'jira_server' not in st.session_state: | |
| st.session_state.jira_server = JIRA_SERVER | |
| if 'is_authenticated' not in st.session_state: | |
| st.session_state.is_authenticated = False # Start as not authenticated | |
| if 'jira_client' not in st.session_state: | |
| st.session_state.jira_client = None | |
| if 'sprint_data_initialized' not in st.session_state: | |
| st.session_state.sprint_data_initialized = False | |
| if 'force_sprint_refresh' not in st.session_state: | |
| st.session_state.force_sprint_refresh = False | |
| if 'sprint_data_cache' not in st.session_state: | |
| st.session_state.sprint_data_cache = None | |
| if 'last_sprint_fetch' not in st.session_state: | |
| st.session_state.last_sprint_fetch = None | |
| # Initialize session state for mode if it doesn't exist | |
| if "mode" not in st.session_state: | |
| st.session_state["mode"] = "multi" | |
| # --- Sidebar Rendering --- | |
| with st.sidebar: | |
| # Mode Selection (kept in sidebar) | |
| selected_mode = st.selectbox( | |
| "Select Mode", | |
| ["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"], | |
| index=["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"].index(st.session_state.get("selected_mode", "Multi")) | |
| ) | |
| # Update the session state with the new selection | |
| st.session_state["selected_mode"] = selected_mode | |
| st.session_state["mode"] = selected_mode.lower() | |
| mode_display = f'## Current mode: {st.session_state["mode"].title()} mode' | |
| st.markdown(mode_display) | |
| st.markdown("---") # Separator | |
| # Jira Login Expander (always shown) | |
| with st.expander("Jira Integration (Optional)", expanded=True): | |
| # Render login - function handles checking if already authenticated | |
| # It updates st.session_state.is_authenticated and st.session_state.jira_client | |
| render_jira_login() | |
| # --- Authentication Check and Sprint Display --- # | |
| # Check if authenticated and update session state | |
| jira_authenticated = ( | |
| st.session_state.get('jira_client') is not None and | |
| st.session_state.get('is_authenticated', False) | |
| ) | |
| # Update is_authenticated based on jira_client existence | |
| if st.session_state.get('jira_client') is not None: | |
| st.session_state.is_authenticated = True | |
| # Now add sprint progress to sidebar if Jira is connected | |
| with st.sidebar: | |
| # Console logging for debugging | |
| # print(f"DEBUG: is_authenticated = {st.session_state.get('is_authenticated', False)}") | |
| # print(f"DEBUG: jira_client exists = {bool(st.session_state.get('jira_client', None))}") | |
| # if st.session_state.get('jira_client'): | |
| # print(f"DEBUG: jira_client type = {type(st.session_state.jira_client)}") | |
| # Sprint Progress Expander (shown if jira_client exists) | |
| if st.session_state.get('jira_client') is not None: | |
| st.markdown("---") # Separator between integrations | |
| with st.expander("Sprint Progress", expanded=True): | |
| # Refresh button | |
| if st.button("🔄 Refresh Sprint Data", key="refresh_sprint_sidebar_app"): | |
| st.session_state.force_sprint_refresh = True | |
| # Always call display (it handles caching), passing manual refresh flag | |
| display_story_points_stats(force_refresh=st.session_state.force_sprint_refresh) | |
| # Reset manual refresh flag after use | |
| st.session_state.force_sprint_refresh = False | |
| else: | |
| print("DEBUG: Sprint Progress not showing - jira_client is None") | |
| # Initialize session state for the selectbox widget | |
| if "selected_mode" not in st.session_state: | |
| st.session_state["selected_mode"] = "Multi" | |
| # --- Main Page Content based on Mode --- | |
| if st.session_state["mode"] == "multi": | |
| multiple_main() | |
| elif st.session_state["mode"] == "compare": | |
| st.sidebar.markdown("### Upload Files for Comparison") | |
| # Move file uploaders to main page area if needed, or keep in sidebar below Jira? | |
| # For now, keeping in sidebar as it was. | |
| upload_option = st.sidebar.radio("Upload method", ["Single uploader", "Two separate uploaders"], key="compare_upload_method") | |
| if upload_option == "Single uploader": | |
| uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for comparison", type=["csv", "xlsx"], accept_multiple_files=True) | |
| if uploaded_files: | |
| if len(uploaded_files) < 2: | |
| st.warning("Please upload at least two files for comparison.") | |
| elif len(uploaded_files) > 2: | |
| st.warning("More than two files uploaded. Only the first two will be used for comparison.") | |
| else: | |
| with st.spinner('Processing...'): | |
| double_main(uploaded_files[0], uploaded_files[1]) | |
| st.success('Comparison Complete!') | |
| else: | |
| col1, col2 = st.sidebar.columns(2) | |
| with col1: | |
| uploaded_file1 = st.file_uploader("Upload older CSV/XLSX file", type=["csv", "xlsx"], key="file1") | |
| with col2: | |
| uploaded_file2 = st.file_uploader("Upload newer CSV/XLSX file", type=["csv", "xlsx"], key="file2") | |
| if uploaded_file1 is not None and uploaded_file2 is not None: | |
| with st.spinner('Processing...'): | |
| double_main(uploaded_file1, uploaded_file2) | |
| st.success('Comparison Complete!') | |
| elif uploaded_file1 is not None or uploaded_file2 is not None: | |
| st.warning("Please upload both files for comparison.") | |
| elif st.session_state["mode"] == "weekly": | |
| uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for Weekly Report", type=["csv", "xlsx"], accept_multiple_files=True) | |
| if uploaded_files: | |
| generate_weekly_report(uploaded_files) | |
| elif st.session_state["mode"] == "multi-env compare": | |
| multi_env_compare_main() | |
| elif st.session_state["mode"] == "auto environment loader": | |
| # Launch the auto environment loader workflow | |
| multiple_env_loader.main() | |
| if __name__ == "__main__": | |
| main() |