Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import pandas as pd | |
| import numpy as np | |
| from datetime import datetime, timedelta | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| from sklearn.ensemble import IsolationForest | |
| from sklearn.linear_model import LinearRegression | |
| import random | |
| import calendar | |
| # Set random seed for reproducibility | |
| np.random.seed(42) | |
| def generate_device_data(num_days=90, device_type="home"): | |
| """Generate synthetic energy consumption data for devices with enhanced patterns""" | |
| dates = pd.date_range(end=datetime.now(), periods=num_days*24, freq='h') | |
| if device_type == "home": | |
| devices = { | |
| 'HVAC': {'base': 8, 'var': 4, 'peak_hours': [14, 15, 16, 17], 'weekend_factor': 1.2}, | |
| 'Refrigerator': {'base': 2, 'var': 0.5, 'peak_hours': [12, 13, 14], 'weekend_factor': 1.0}, | |
| 'Washing Machine': {'base': 1, 'var': 0.8, 'peak_hours': [10, 19, 20], 'weekend_factor': 1.5}, | |
| 'Lighting': {'base': 1.5, 'var': 0.3, 'peak_hours': [18, 19, 20, 21], 'weekend_factor': 1.1}, | |
| 'Television': {'base': 0.5, 'var': 0.2, 'peak_hours': [20, 21, 22], 'weekend_factor': 1.3} | |
| } | |
| else: | |
| devices = { | |
| 'HVAC System': {'base': 20, 'var': 8, 'peak_hours': [14, 15, 16, 17], 'weekend_factor': 0.6}, | |
| 'Server Room': {'base': 15, 'var': 3, 'peak_hours': [12, 13, 14], 'weekend_factor': 0.9}, | |
| 'Office Equipment': {'base': 10, 'var': 4, 'peak_hours': [9, 10, 11, 14, 15], 'weekend_factor': 0.4}, | |
| 'Lighting': {'base': 8, 'var': 2, 'peak_hours': [9, 10, 11, 14, 15], 'weekend_factor': 0.5}, | |
| 'Kitchen Appliances': {'base': 5, 'var': 2, 'peak_hours': [12, 13], 'weekend_factor': 0.3} | |
| } | |
| data = [] | |
| for date in dates: | |
| hour = date.hour | |
| is_weekend = date.weekday() >= 5 | |
| for device, params in devices.items(): | |
| # Add seasonal variation | |
| seasonal_factor = 1 + 0.3 * np.sin(2 * np.pi * date.dayofyear / 365) | |
| # Add peak hour variation | |
| peak_factor = 1.5 if hour in params['peak_hours'] else 1 | |
| # Add weekend variation | |
| weekend_factor = params['weekend_factor'] if is_weekend else 1 | |
| # Base consumption with random variation | |
| consumption = (params['base'] * seasonal_factor * peak_factor * weekend_factor + | |
| np.random.normal(0, params['var'])) | |
| # Add some anomalies (3% chance) | |
| if np.random.random() < 0.03: | |
| consumption *= np.random.choice([1.5, 2.0, 0.5]) | |
| data.append({ | |
| 'Date': date, | |
| 'Device': device, | |
| 'Consumption': max(0, consumption), | |
| 'Hour': hour, | |
| 'Weekday': date.strftime('%A'), | |
| 'Weekend': is_weekend | |
| }) | |
| return pd.DataFrame(data) | |
| def detect_anomalies(df): | |
| """Enhanced anomaly detection using Isolation Forest with multiple features""" | |
| iso_forest = IsolationForest(contamination=0.03, random_state=42) | |
| by_device = df.groupby('Device') | |
| anomalies = [] | |
| for device, group in by_device: | |
| # Use multiple features for anomaly detection | |
| features = group[['Consumption', 'Hour']].copy() | |
| features['Weekend'] = group['Weekend'].astype(int) | |
| predictions = iso_forest.fit_predict(features) | |
| anomaly_indices = predictions == -1 | |
| anomaly_data = group[anomaly_indices] | |
| for _, row in anomaly_data.iterrows(): | |
| anomalies.append({ | |
| 'Device': device, | |
| 'Date': row['Date'], | |
| 'Consumption': row['Consumption'], | |
| 'Hour': row['Hour'], | |
| 'Weekday': row['Weekday'] | |
| }) | |
| return pd.DataFrame(anomalies) | |
| def generate_insights(df): | |
| """Generate detailed insights from the energy consumption data""" | |
| insights = [] | |
| # Peak usage analysis | |
| peak_hours = df.groupby(['Device', 'Hour'])['Consumption'].mean().reset_index() | |
| for device in df['Device'].unique(): | |
| device_peaks = peak_hours[peak_hours['Device'] == device].nlargest(3, 'Consumption') | |
| insights.append({ | |
| 'Type': 'Peak Hours', | |
| 'Device': device, | |
| 'Description': f"Peak usage hours: {', '.join(map(str, device_peaks['Hour']))}", | |
| 'Impact': 'High' | |
| }) | |
| # Weekend vs Weekday analysis | |
| weekend_comparison = df.groupby(['Device', 'Weekend'])['Consumption'].mean().unstack() | |
| for device in weekend_comparison.index: | |
| diff_pct = ((weekend_comparison.loc[device, True] - weekend_comparison.loc[device, False]) / | |
| weekend_comparison.loc[device, False] * 100) | |
| insights.append({ | |
| 'Type': 'Weekend Pattern', | |
| 'Device': device, | |
| 'Description': f"{'Higher' if diff_pct > 0 else 'Lower'} weekend usage by {abs(diff_pct):.1f}%", | |
| 'Impact': 'Medium' if abs(diff_pct) < 20 else 'High' | |
| }) | |
| return pd.DataFrame(insights) | |
| def predict_consumption(df, days_ahead=30): | |
| """Predict future consumption using linear regression with multiple features""" | |
| predictions = [] | |
| for device in df['Device'].unique(): | |
| device_data = df[df['Device'] == device].copy() | |
| # Create features for prediction | |
| device_data['Day_of_Week'] = device_data['Date'].dt.dayofweek | |
| device_data['Month'] = device_data['Date'].dt.month | |
| device_data['Day_of_Year'] = device_data['Date'].dt.dayofyear | |
| X = device_data[['Hour', 'Day_of_Week', 'Month', 'Day_of_Year']] | |
| y = device_data['Consumption'] | |
| model = LinearRegression() | |
| model.fit(X, y) | |
| # Generate future dates | |
| future_dates = pd.date_range( | |
| start=df['Date'].max() + timedelta(hours=1), | |
| periods=days_ahead*24, | |
| freq='h' | |
| ) | |
| future_X = pd.DataFrame({ | |
| 'Hour': future_dates.hour, | |
| 'Day_of_Week': future_dates.dayofweek, | |
| 'Month': future_dates.month, | |
| 'Day_of_Year': future_dates.dayofyear | |
| }) | |
| future_predictions = model.predict(future_X) | |
| for date, pred in zip(future_dates, future_predictions): | |
| predictions.append({ | |
| 'Date': date, | |
| 'Device': device, | |
| 'Predicted_Consumption': max(0, pred) | |
| }) | |
| return pd.DataFrame(predictions) | |
| # Streamlit UI | |
| st.set_page_config(page_title="SEMS - Smart Energy Management System", layout="wide", initial_sidebar_state="expanded") | |
| # Custom CSS | |
| st.markdown(""" | |
| <style> | |
| .main { | |
| padding: 2rem; | |
| } | |
| .stMetric { | |
| background-color: #f0f2f6; | |
| padding: 1rem; | |
| border-radius: 0.5rem; | |
| } | |
| .insight-card { | |
| background-color: #ffffff; | |
| padding: 1rem; | |
| border-radius: 0.5rem; | |
| margin: 0.5rem 0; | |
| border: 1px solid #e0e0e0; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| st.title("🏢 SEMS - Smart Energy Management System") | |
| # Sidebar configuration | |
| st.sidebar.title("Configuration") | |
| user_type = st.sidebar.radio("Select User Type", ["Home", "Organization"]) | |
| analysis_period = st.sidebar.slider("Analysis Period (Days)", 30, 180, 90) | |
| # Generate data | |
| data = generate_device_data(num_days=analysis_period, device_type=user_type.lower()) | |
| # Main tabs | |
| tab1, tab2, tab3, tab4 = st.tabs([ | |
| "📊 Usage Dashboard", | |
| "🔍 Detailed Analysis", | |
| "⚠️ Peak Usage Detection", | |
| "📈 Forecasting" | |
| ]) | |
| with tab1: | |
| st.header("Energy Usage Dashboard") | |
| # Key metrics | |
| col1, col2, col3 = st.columns(3) | |
| total_consumption = data['Consumption'].sum() | |
| avg_daily = data.groupby(data['Date'].dt.date)['Consumption'].sum().mean() | |
| peak_hour = data.groupby('Hour')['Consumption'].mean().idxmax() | |
| col1.metric("Total Consumption", f"{total_consumption:.1f} kWh") | |
| col2.metric("Average Daily Usage", f"{avg_daily:.1f} kWh") | |
| col3.metric("Peak Usage Hour", f"{peak_hour}:00") | |
| # Daily consumption trend | |
| st.subheader("Daily Consumption Trend") | |
| daily_consumption = data.groupby(['Date', 'Device'])['Consumption'].sum().reset_index() | |
| fig = px.line(daily_consumption, x='Date', y='Consumption', color='Device', | |
| title='Energy Consumption Over Time') | |
| fig.update_layout(height=400) | |
| st.plotly_chart(fig, use_container_width=True) | |
| # Device-wise distribution | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| device_total = data.groupby('Device')['Consumption'].sum().sort_values(ascending=True) | |
| fig = px.bar(device_total, orientation='h', | |
| title='Total Consumption by Device') | |
| st.plotly_chart(fig, use_container_width=True) | |
| with col2: | |
| hourly_avg = data.groupby(['Hour', 'Device'])['Consumption'].mean().reset_index() | |
| fig = px.line(hourly_avg, x='Hour', y='Consumption', color='Device', | |
| title='Average Hourly Consumption Pattern') | |
| st.plotly_chart(fig, use_container_width=True) | |
| with tab2: | |
| st.header("Detailed Analysis") | |
| # Weekday vs Weekend analysis | |
| st.subheader("Weekday vs Weekend Consumption") | |
| weekly_pattern = data.groupby(['Weekday', 'Device'])['Consumption'].mean().reset_index() | |
| fig = px.bar(weekly_pattern, x='Weekday', y='Consumption', color='Device', | |
| title='Average Consumption by Day of Week') | |
| st.plotly_chart(fig, use_container_width=True) | |
| # Hourly heatmap | |
| st.subheader("Hourly Consumption Heatmap") | |
| hourly_data = data.pivot_table( | |
| values='Consumption', | |
| index='Hour', | |
| columns='Weekday', | |
| aggfunc='mean' | |
| ) | |
| fig = px.imshow(hourly_data, | |
| labels=dict(x="Day of Week", y="Hour of Day", color="Consumption"), | |
| aspect="auto", | |
| title="Consumption Intensity by Hour and Day") | |
| st.plotly_chart(fig, use_container_width=True) | |
| # Display insights | |
| st.subheader("Key Insights") | |
| insights = generate_insights(data) | |
| for _, insight in insights.iterrows(): | |
| with st.expander(f"{insight['Device']} - {insight['Type']} (Impact: {insight['Impact']})"): | |
| st.write(insight['Description']) | |
| with tab3: | |
| st.header("Peak Usage Detection") | |
| # Detect anomalies | |
| anomalies = detect_anomalies(data) | |
| if not anomalies.empty: | |
| st.warning(f"Detected {len(anomalies)} anomalies in energy consumption") | |
| # Plot with anomalies | |
| fig = go.Figure() | |
| for device in data['Device'].unique(): | |
| device_data = data[data['Device'] == device] | |
| device_anomalies = anomalies[anomalies['Device'] == device] | |
| fig.add_trace(go.Scatter( | |
| x=device_data['Date'], | |
| y=device_data['Consumption'], | |
| name=f"{device} (normal)", | |
| mode='lines' | |
| )) | |
| if not device_anomalies.empty: | |
| fig.add_trace(go.Scatter( | |
| x=device_anomalies['Date'], | |
| y=device_anomalies['Consumption'], | |
| name=f"{device} (anomaly)", | |
| mode='markers', | |
| marker=dict(size=10, symbol='x', color='red') | |
| )) | |
| fig.update_layout( | |
| title='Energy Consumption with Detected Anomalies', | |
| height=500 | |
| ) | |
| st.plotly_chart(fig, use_container_width=True) | |
| # Anomaly details in an expandable table | |
| st.subheader("Peak Usage Details") | |
| for device in anomalies['Device'].unique(): | |
| device_anomalies = anomalies[anomalies['Device'] == device].copy() | |
| device_anomalies['Date'] = device_anomalies['Date'].dt.strftime('%Y-%m-%d %H:%M') | |
| with st.expander(f"Anomalies for {device}"): | |
| st.dataframe( | |
| device_anomalies[['Date', 'Consumption', 'Hour', 'Weekday']], | |
| use_container_width=True | |
| ) | |
| with tab4: | |
| st.header("Consumption Forecasting") | |
| # Generate predictions | |
| predictions = predict_consumption(data) | |
| # Plot historical data and predictions | |
| st.subheader("Consumption Forecast") | |
| for device in predictions['Device'].unique(): | |
| with st.expander(f"Forecast for {device}"): | |
| historical = data[data['Device'] == device] | |
| device_predictions = predictions[predictions['Device'] == device] | |
| fig = go.Figure() | |
| # Historical data | |
| fig.add_trace(go.Scatter( | |
| x=historical['Date'], | |
| y=historical['Consumption'], | |
| name='Historical', | |
| line=dict(color='blue') | |
| )) | |
| # Predictions | |
| fig.add_trace(go.Scatter( | |
| x=device_predictions['Date'], | |
| y=device_predictions['Predicted_Consumption'], | |
| name='Forecast', | |
| line=dict(color='red', dash='dash') | |
| )) | |
| fig.update_layout( | |
| title=f'Energy Consumption Forecast - {device}', | |
| xaxis_title='Date', | |
| yaxis_title='Consumption (kWh)', | |
| height=400 | |
| ) | |
| st.plotly_chart(fig, use_container_width=True) | |
| # Summary statistics | |
| col1, col2, col3 = st.columns(3) | |
| avg_historical = historical['Consumption'].mean() | |
| avg_predicted = device_predictions['Predicted_Consumption'].mean() | |
| change_pct = (avg_predicted - avg_historical) / avg_historical * 100 | |
| col1.metric( | |
| "Average Historical Usage", | |
| f"{avg_historical:.2f} kWh" | |
| ) | |
| col2.metric( | |
| "Average Predicted Usage", | |
| f"{avg_predicted:.2f} kWh" | |
| ) | |
| col3.metric( | |
| "Expected Change", | |
| f"{change_pct:+.1f}%", | |
| delta_color="inverse" | |
| ) | |
| # Additional insights section | |
| st.subheader("Energy Saving Opportunities") | |
| # Calculate potential savings based on patterns | |
| def calculate_savings_opportunities(historical_data, predictions_data): | |
| opportunities = [] | |
| # Check for peak hour reduction potential | |
| peak_hours = historical_data.groupby('Hour')['Consumption'].mean() | |
| top_peak_hours = peak_hours.nlargest(3) | |
| potential_peak_savings = top_peak_hours.sum() * 0.2 # Assume 20% reduction possible | |
| opportunities.append({ | |
| 'Type': 'Peak Hour Reduction', | |
| 'Description': f'Reduce usage during peak hours ({", ".join(map(str, top_peak_hours.index))}:00)', | |
| 'Potential_Savings': f'{potential_peak_savings:.2f} kWh per day' | |
| }) | |
| # Check for weekend optimization | |
| weekend_data = historical_data[historical_data['Weekend']] | |
| weekday_data = historical_data[~historical_data['Weekend']] | |
| if weekend_data['Consumption'].mean() > weekday_data['Consumption'].mean(): | |
| weekend_savings = (weekend_data['Consumption'].mean() - weekday_data['Consumption'].mean()) * 2 | |
| opportunities.append({ | |
| 'Type': 'Weekend Optimization', | |
| 'Description': 'Optimize weekend consumption patterns', | |
| 'Potential_Savings': f'{weekend_savings:.2f} kWh per weekend' | |
| }) | |
| # Seasonal optimization | |
| seasonal_data = historical_data.copy() | |
| seasonal_data['Month'] = seasonal_data['Date'].dt.month | |
| monthly_avg = seasonal_data.groupby('Month')['Consumption'].mean() | |
| seasonal_variation = monthly_avg.max() - monthly_avg.min() | |
| if seasonal_variation > monthly_avg.mean() * 0.3: # If variation is more than 30% | |
| opportunities.append({ | |
| 'Type': 'Seasonal Optimization', | |
| 'Description': 'Implement seasonal usage strategies', | |
| 'Potential_Savings': f'{seasonal_variation:.2f} kWh per month' | |
| }) | |
| return pd.DataFrame(opportunities) | |
| savings_opportunities = calculate_savings_opportunities(data, predictions) | |
| for _, opportunity in savings_opportunities.iterrows(): | |
| with st.expander(f"💡 {opportunity['Type']}"): | |
| st.write(f"**Description:** {opportunity['Description']}") | |
| st.write(f"**Potential Savings:** {opportunity['Potential_Savings']}") | |
| # Add specific recommendations based on opportunity type | |
| if opportunity['Type'] == 'Peak Hour Reduction': | |
| st.write(""" | |
| **Recommendations:** | |
| - Schedule high-energy activities during off-peak hours | |
| - Use automated controls to limit non-essential usage during peak times | |
| - Consider energy storage solutions for peak shifting | |
| """) | |
| elif opportunity['Type'] == 'Weekend Optimization': | |
| st.write(""" | |
| **Recommendations:** | |
| - Review weekend device scheduling | |
| - Implement automatic shutdown for unused equipment | |
| - Optimize temperature settings for unoccupied periods | |
| """) | |
| elif opportunity['Type'] == 'Seasonal Optimization': | |
| st.write(""" | |
| **Recommendations:** | |
| - Adjust HVAC settings seasonally | |
| - Implement weather-based control strategies | |
| - Schedule maintenance during shoulder seasons | |
| """) | |
| # Add export functionality | |
| if st.sidebar.button("Export Analysis Report"): | |
| # Create report dataframe | |
| report_data = { | |
| 'Metric': [ | |
| 'Total Consumption', | |
| 'Average Daily Usage', | |
| 'Peak Usage Hour', | |
| 'Number of Anomalies', | |
| 'Forecast Trend' | |
| ], | |
| 'Value': [ | |
| f"{total_consumption:.1f} kWh", | |
| f"{avg_daily:.1f} kWh", | |
| f"{peak_hour}:00", | |
| len(anomalies), | |
| f"{change_pct:+.1f}% (30-day forecast)" | |
| ] | |
| } | |
| report_df = pd.DataFrame(report_data) | |
| # Convert to CSV | |
| csv = report_df.to_csv(index=False) | |
| st.sidebar.download_button( | |
| label="Download Report", | |
| data=csv, | |
| file_name="energy_analysis_report.csv", | |
| mime="text/csv" | |
| ) | |
| # Add help section in sidebar | |
| with st.sidebar.expander("ℹ️ Help"): | |
| st.write(""" | |
| **Using the Dashboard:** | |
| 1. Select your user type (Home/Organization) | |
| 2. Adjust the analysis period using the slider | |
| 3. Navigate through tabs to view different analyses | |
| 4. Use expanders to see detailed information | |
| 5. Export your analysis report using the button above | |
| For additional support, contact our team at support@sems.com | |
| """) | |
| # Add system status | |
| st.sidebar.markdown("---") | |
| st.sidebar.markdown("### System Status") | |
| st.sidebar.markdown("✅ All Systems Operational") | |
| st.sidebar.markdown(f"Last Updated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") |