| | import streamlit as st |
| | import os |
| | import json |
| | import pandas as pd |
| | import random |
| | from datetime import datetime |
| | from os.path import join |
| | from src import ( |
| | preprocess_and_load_df, |
| | get_from_user, |
| | ask_question, |
| | ) |
| | from dotenv import load_dotenv |
| | from langchain_groq import ChatGroq |
| | from langchain_google_genai import ChatGoogleGenerativeAI |
| | from streamlit_feedback import streamlit_feedback |
| | from huggingface_hub import HfApi |
| | from datasets import load_dataset, get_dataset_config_info, Dataset |
| | from PIL import Image |
| | import time |
| | import uuid |
| | import asyncio |
| |
|
| | |
| | try: |
| | asyncio.get_running_loop() |
| | except RuntimeError: |
| | loop = asyncio.new_event_loop() |
| | asyncio.set_event_loop(loop) |
| |
|
| | |
| | st.set_page_config( |
| | page_title="VayuChat - AI Air Quality Assistant", |
| | page_icon="V", |
| | layout="wide", |
| | initial_sidebar_state="expanded" |
| | ) |
| |
|
| | |
| | st.markdown(""" |
| | <style> |
| | /* User message styling - MUST be defined early */ |
| | .user-message { |
| | background: #3b82f6 !important; |
| | color: white !important; |
| | padding: 0.75rem 1rem !important; |
| | border-radius: 7px !important; |
| | max-width: 95% !important; |
| | } |
| | |
| | /* Assistant message styling */ |
| | .assistant-message { |
| | background: #f1f5f9 !important; |
| | color: #334155 !important; |
| | padding: 0.75rem 1rem !important; |
| | border-radius: 12px !important; |
| | max-width: 85% !important; |
| | } |
| | |
| | .assistant-info { |
| | font-size: 0.875rem !important; |
| | color: #6b7280 !important; |
| | margin-bottom: 5px !important; |
| | } |
| | </style> |
| | """, unsafe_allow_html=True) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | load_dotenv(override=True) |
| |
|
| | |
| | Groq_Token = os.getenv("GROQ_API_KEY") |
| | hf_token = os.getenv("HF_TOKEN") |
| | gemini_token = os.getenv("GEMINI_TOKEN") |
| |
|
| | |
| | models = { |
| | "gpt-oss-120b": "openai/gpt-oss-120b", |
| | "qwen3-32b": "qwen/qwen3-32b", |
| | "gpt-oss-20b": "openai/gpt-oss-20b", |
| | "llama4 maverik":"meta-llama/llama-4-maverick-17b-128e-instruct", |
| | "llama3.3": "llama-3.3-70b-versatile", |
| | "deepseek-R1": "deepseek-r1-distill-llama-70b", |
| | "gemini-2.5-flash": "gemini-2.5-flash", |
| | "gemini-2.5-pro": "gemini-2.5-pro", |
| | "gemini-2.5-flash-lite": "gemini-2.5-flash-lite", |
| | "gemini-2.0-flash": "gemini-2.0-flash", |
| | "gemini-2.0-flash-lite": "gemini-2.0-flash-lite", |
| | |
| | |
| | } |
| |
|
| | self_path = os.path.dirname(os.path.abspath(__file__)) |
| |
|
| | |
| | if "session_id" not in st.session_state: |
| | st.session_state.session_id = str(uuid.uuid4()) |
| |
|
| | def upload_feedback(feedback, error, output, last_prompt, code, status): |
| | """Enhanced feedback upload function with better logging and error handling""" |
| | try: |
| | if not hf_token or hf_token.strip() == "": |
| | st.warning("Cannot upload feedback - HF_TOKEN not available") |
| | return False |
| |
|
| | |
| | feedback_data = { |
| | "timestamp": datetime.now().isoformat(), |
| | "session_id": st.session_state.session_id, |
| | "feedback_score": feedback.get("score", ""), |
| | "feedback_comment": feedback.get("text", ""), |
| | "user_prompt": last_prompt, |
| | "ai_output": str(output), |
| | "generated_code": code or "", |
| | "error_message": error or "", |
| | "is_image_output": status.get("is_image", False), |
| | "success": not bool(error) |
| | } |
| |
|
| | |
| | timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S") |
| | random_id = str(uuid.uuid4())[:8] |
| | folder_name = f"feedback_{timestamp_str}_{random_id}" |
| | |
| | |
| | markdown_content = f"""# VayuChat Feedback Report |
| | |
| | ## Session Information |
| | - **Timestamp**: {feedback_data['timestamp']} |
| | - **Session ID**: {feedback_data['session_id']} |
| | |
| | ## User Interaction |
| | **Prompt**: {feedback_data['user_prompt']} |
| | |
| | ## AI Response |
| | **Output**: {feedback_data['ai_output']} |
| | |
| | ## Generated Code |
| | ```python |
| | {feedback_data['generated_code']} |
| | ``` |
| | |
| | ## Technical Details |
| | - **Error Message**: {feedback_data['error_message']} |
| | - **Is Image Output**: {feedback_data['is_image_output']} |
| | - **Success**: {feedback_data['success']} |
| | |
| | ## User Feedback |
| | - **Score**: {feedback_data['feedback_score']} |
| | - **Comments**: {feedback_data['feedback_comment']} |
| | """ |
| |
|
| | |
| | markdown_filename = f"{folder_name}.md" |
| | markdown_local_path = f"/tmp/{markdown_filename}" |
| | |
| | with open(markdown_local_path, "w", encoding="utf-8") as f: |
| | f.write(markdown_content) |
| |
|
| | |
| | api = HfApi(token=hf_token) |
| | |
| | |
| | api.upload_file( |
| | path_or_fileobj=markdown_local_path, |
| | path_in_repo=f"data/{markdown_filename}", |
| | repo_id="SustainabilityLabIITGN/VayuChat_Feedback", |
| | repo_type="dataset", |
| | ) |
| | |
| | |
| | if status.get("is_image", False) and isinstance(output, str) and os.path.exists(output): |
| | try: |
| | image_filename = f"{folder_name}_plot.png" |
| | api.upload_file( |
| | path_or_fileobj=output, |
| | path_in_repo=f"data/{image_filename}", |
| | repo_id="SustainabilityLabIITGN/VayuChat_Feedback", |
| | repo_type="dataset", |
| | ) |
| | except Exception as img_error: |
| | print(f"Error uploading image: {img_error}") |
| | |
| | |
| | if os.path.exists(markdown_local_path): |
| | os.remove(markdown_local_path) |
| | |
| | st.success("Feedback uploaded successfully!") |
| | return True |
| | |
| | except Exception as e: |
| | st.error(f"Error uploading feedback: {e}") |
| | print(f"Feedback upload error: {e}") |
| | return False |
| |
|
| | |
| | available_models = [] |
| | model_names = list(models.keys()) |
| | groq_models = [] |
| | gemini_models = [] |
| | for model_name in model_names: |
| | if "gemini" not in model_name: |
| | groq_models.append(model_name) |
| | else: |
| | gemini_models.append(model_name) |
| | if Groq_Token and Groq_Token.strip(): |
| | available_models.extend(groq_models) |
| | if gemini_token and gemini_token.strip(): |
| | available_models.extend(gemini_models) |
| |
|
| | if not available_models: |
| | st.error("No API keys available! Please set up your API keys in the .env file") |
| | st.stop() |
| |
|
| | |
| | default_index = 0 |
| | if "gpt-oss-120b" in available_models: |
| | default_index = available_models.index("gpt-oss-120b") |
| | elif "deepseek-R1" in available_models: |
| | default_index = available_models.index("deepseek-R1") |
| |
|
| | |
| | st.markdown(""" |
| | <style> |
| | .header-container { |
| | display: flex; |
| | align-items: center; |
| | justify-content: center; |
| | gap: 12px; |
| | border-bottom: 1px solid #e5e7eb; |
| | } |
| | |
| | .header-container img { |
| | height: 80px; |
| | } |
| | |
| | .header-container h1 { |
| | padding: 0.25rem 0; |
| | margin: 0; |
| | font-size: 1.5rem; |
| | font-weight: 700; |
| | color: #2563eb; |
| | } |
| | |
| | /* 🔹 Responsive: On small screens stack vertically */ |
| | @media (max-width: 768px) { |
| | .header-container { |
| | flex-direction: column; |
| | text-align: center; |
| | gap: 0; |
| | padding: 0 0 0.40rem; |
| | } |
| | .header-container img { |
| | height: 60px; |
| | } |
| | .header-container h1 { |
| | padding: 0 0; |
| | font-size: 1.25rem; |
| | } |
| | } |
| | </style> |
| | <div class="header-container"> |
| | <img src="https://sustainability-lab.github.io/images/logo_light.svg" /> |
| | <div style="display: flex; flex-direction: column; line-height: 1.2;"> |
| | <h1>VayuChat</h1> |
| | <span>AI Air Quality Analysis • Sustainability Lab, IIT Gandhinagar</span> |
| | </div> |
| | </div> |
| | """, unsafe_allow_html=True) |
| |
|
| | |
| | @st.cache_data |
| | def load_data(): |
| | return preprocess_and_load_df(join(self_path, "Data.csv")) |
| |
|
| | try: |
| | df = load_data() |
| | |
| | except Exception as e: |
| | st.error(f"Error loading data: {e}") |
| | st.stop() |
| |
|
| | inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2" |
| | image_path = "IITGN_Logo.png" |
| |
|
| | |
| | with st.sidebar: |
| | |
| | model_name = st.selectbox( |
| | "🤖 AI Model:", |
| | available_models, |
| | index=default_index, |
| | help="Choose your AI model - easily accessible without scrolling!" |
| | ) |
| | |
| | st.markdown("---") |
| | |
| | |
| | st.markdown("### 💭 Quick Queries") |
| | |
| | |
| | @st.cache_data |
| | def load_questions(): |
| | questions = [] |
| | questions_file = join(self_path, "questions.txt") |
| | if os.path.exists(questions_file): |
| | try: |
| | with open(questions_file, 'r', encoding='utf-8') as f: |
| | content = f.read() |
| | questions = [q.strip() for q in content.split("\n") if q.strip()] |
| | except Exception as e: |
| | questions = [] |
| | return questions |
| | |
| | questions = load_questions() |
| | |
| | |
| | if not questions: |
| | questions = [ |
| | "Which month had highest pollution?", |
| | "Which city has worst air quality?", |
| | "Show annual PM2.5 average", |
| | "Plot monthly average PM2.5 for 2023", |
| | "List all cities by pollution level", |
| | "Compare winter vs summer pollution", |
| | "Show seasonal pollution patterns", |
| | "Which areas exceed WHO guidelines?", |
| | "What are peak pollution hours?", |
| | "Show PM10 vs PM2.5 comparison", |
| | "Which station records highest variability in PM2.5?", |
| | "Calculate pollution improvement rate year-over-year by city", |
| | "Identify cities with PM2.5 levels consistently above 50 μg/m³ for >6 months", |
| | "Find correlation between PM2.5 and PM10 across different seasons and cities", |
| | "Compare weekday vs weekend levels", |
| | "Plot yearly trend analysis", |
| | "Show pollution distribution by city", |
| | "Create correlation plot between pollutants" |
| | ] |
| | |
| | |
| | selected_prompt = None |
| | |
| | |
| | |
| | if len(questions) > 0: |
| | st.markdown("**Select a question to analyze:**") |
| | |
| | |
| | getting_started_questions = questions[:10] |
| | with st.expander("🚀 Getting Started - Simple Questions", expanded=True): |
| | for i, q in enumerate(getting_started_questions): |
| | if st.button(q, key=f"start_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | |
| | with st.expander("📊 NCAP Funding & Policy Analysis", expanded=False): |
| | for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ncap', 'funding', 'investment', 'rupee'])]): |
| | if st.button(q, key=f"ncap_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | with st.expander("🌬️ Meteorology & Environmental Factors", expanded=False): |
| | for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'])]): |
| | if st.button(q, key=f"met_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | with st.expander("👥 Population & Demographics", expanded=False): |
| | for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['population', 'capita', 'density', 'exposure'])]): |
| | if st.button(q, key=f"pop_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | with st.expander("🏭 Multi-Pollutant Analysis", expanded=False): |
| | for i, q in enumerate([q for q in questions if any(word in q.lower() for word in ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'])]): |
| | if st.button(q, key=f"multi_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | with st.expander("📈 Other Analysis Questions", expanded=False): |
| | remaining_questions = [q for q in questions if not any(any(word in q.lower() for word in category) for category in [ |
| | ['ncap', 'funding', 'investment', 'rupee'], |
| | ['wind', 'temperature', 'humidity', 'rainfall', 'meteorological', 'monsoon', 'barometric'], |
| | ['population', 'capita', 'density', 'exposure'], |
| | ['ozone', 'no2', 'correlation', 'multi-pollutant', 'interaction'] |
| | ])] |
| | for i, q in enumerate(remaining_questions): |
| | if st.button(q, key=f"other_q_{i}", use_container_width=True, help=f"Analyze: {q}"): |
| | selected_prompt = q |
| | st.session_state.last_selected_prompt = q |
| | |
| | st.markdown("---") |
| | |
| | |
| | |
| | if st.button("Clear Chat", use_container_width=True): |
| | st.session_state.responses = [] |
| | st.session_state.processing = False |
| | st.session_state.session_id = str(uuid.uuid4()) |
| | try: |
| | st.rerun() |
| | except AttributeError: |
| | st.experimental_rerun() |
| |
|
| | |
| | if "responses" not in st.session_state: |
| | st.session_state.responses = [] |
| | if "processing" not in st.session_state: |
| | st.session_state.processing = False |
| | if "session_id" not in st.session_state: |
| | st.session_state.session_id = str(uuid.uuid4()) |
| |
|
| |
|
| |
|
| |
|
| | def show_custom_response(response): |
| | """Custom response display function with improved styling""" |
| | role = response.get("role", "assistant") |
| | content = response.get("content", "") |
| | |
| | if role == "user": |
| | |
| | st.markdown(f""" |
| | <div style='display: flex; justify-content: flex-end; margin: 1rem 0;'> |
| | <div class='user-message'> |
| | {content} |
| | </div> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | elif role == "assistant": |
| | |
| | is_image_path = isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg', '.jpeg']) |
| | |
| | |
| | import pandas as pd |
| | is_dataframe = isinstance(content, pd.DataFrame) |
| | |
| | |
| | error = response.get("error") |
| | timestamp = response.get("timestamp", "") |
| | timestamp_display = f" • {timestamp}" if timestamp else "" |
| | |
| | if error: |
| | st.markdown(f""" |
| | <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'> |
| | <div class='assistant-message'> |
| | <div class='assistant-info'>VayuChat{timestamp_display}</div> |
| | <div class='error-message'> |
| | ⚠️ <strong>Error:</strong> {error} |
| | <br><br> |
| | <em>💡 Try rephrasing your question or being more specific about what you'd like to analyze.</em> |
| | </div> |
| | </div> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | |
| | elif not is_image_path and not is_dataframe: |
| | st.markdown(f""" |
| | <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'> |
| | <div class='assistant-message'> |
| | <div class='assistant-info'>VayuChat{timestamp_display}</div> |
| | {content if isinstance(content, str) else str(content)} |
| | </div> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | elif is_dataframe: |
| | |
| | st.markdown(f""" |
| | <div style='display: flex; justify-content: flex-start; margin: 1rem 0;'> |
| | <div class='assistant-message'> |
| | <div class='assistant-info'>VayuChat{timestamp_display}</div> |
| | Here are the results: |
| | </div> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | |
| | |
| | st.markdown(""" |
| | <div class='context-info'> |
| | 💡 This table is interactive - click column headers to sort, or scroll to view all data. |
| | </div> |
| | """, unsafe_allow_html=True) |
| | |
| | |
| | st.dataframe( |
| | content, |
| | use_container_width=True, |
| | hide_index=True, |
| | column_config=None |
| | ) |
| | |
| | |
| | if response.get("gen_code"): |
| | with st.expander("📋 View Generated Code", expanded=False): |
| | st.code(response["gen_code"], language="python") |
| | |
| | |
| | is_plot_response = isinstance(content, str) and "Plot displayed successfully" in content |
| | |
| | |
| | try: |
| | if isinstance(content, str) and (content.endswith('.png') or content.endswith('.jpg')): |
| | if os.path.exists(content): |
| | |
| | st.markdown(""" |
| | <div style='margin: 1rem 0; display: flex; justify-content: center;'> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | st.image(content, width=1080, caption="Generated Visualization") |
| | return {"is_image": True} |
| | |
| | elif isinstance(content, str) and any(ext in content for ext in ['.png', '.jpg']): |
| | |
| | import re |
| | filename_match = re.search(r'([^/\\]+\.(?:png|jpg|jpeg))', content) |
| | if filename_match: |
| | filename = filename_match.group(1) |
| | if os.path.exists(filename): |
| | st.markdown(""" |
| | <div style='margin: 1rem 0; display: flex; justify-content: center;'> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | st.image(filename, width=1080, caption="Generated Visualization") |
| | return {"is_image": True} |
| | except: |
| | pass |
| | |
| | return {"is_image": False} |
| |
|
| |
|
| | |
| | |
| | for response_id, response in enumerate(st.session_state.responses): |
| | status = show_custom_response(response) |
| | |
| | |
| | if response["role"] == "assistant": |
| | feedback_key = f"feedback_{int(response_id/2)}" |
| | error = response.get("error", "") |
| | output = response.get("content", "") |
| | last_prompt = response.get("last_prompt", "") |
| | code = response.get("gen_code", "") |
| |
|
| |
|
| | |
| | st.markdown('<div style="margin: 1.5rem 0 0.5rem 0;"></div>', unsafe_allow_html=True) |
| | |
| | if "feedback" in st.session_state.responses[response_id]: |
| | |
| | feedback_data = st.session_state.responses[response_id]["feedback"] |
| | col1, col2 = st.columns([3, 1]) |
| | with col1: |
| | st.markdown(f""" |
| | <div style=' |
| | background: linear-gradient(135deg, #ecfdf5 0%, #d1fae5 100%); |
| | border: 1px solid #a7f3d0; |
| | border-radius: 8px; |
| | padding: 0.75rem 1rem; |
| | display: flex; |
| | align-items: center; |
| | gap: 8px; |
| | '> |
| | <span style='font-size: 1.1rem;'>{feedback_data.get('score', '')}</span> |
| | <span style='color: #059669; font-weight: 500; font-size: 0.9rem;'> |
| | Thanks for your feedback! |
| | </span> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | with col2: |
| | if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True): |
| | user_prompt = "" |
| | if response_id > 0: |
| | user_prompt = st.session_state.responses[response_id-1].get("content", "") |
| | if user_prompt: |
| | if response_id > 0: |
| | retry_prompt = st.session_state.responses[response_id-1].get("content", "") |
| | del st.session_state.responses[response_id] |
| | del st.session_state.responses[response_id-1] |
| | st.session_state.follow_up_prompt = retry_prompt |
| | st.rerun() |
| | else: |
| | |
| | col1, col2, col3, col4 = st.columns([2, 2, 1, 1]) |
| | |
| | with col1: |
| | if st.button("✨ Excellent", key=f"{feedback_key}_excellent", use_container_width=True): |
| | feedback = {"score": "✨ Excellent", "text": ""} |
| | st.session_state.responses[response_id]["feedback"] = feedback |
| | st.rerun() |
| | |
| | with col2: |
| | if st.button("🔧 Needs work", key=f"{feedback_key}_poor", use_container_width=True): |
| | feedback = {"score": "🔧 Needs work", "text": ""} |
| | st.session_state.responses[response_id]["feedback"] = feedback |
| | st.rerun() |
| | |
| | with col4: |
| | if st.button("🔄 Retry", key=f"retry_{response_id}", use_container_width=True): |
| | user_prompt = "" |
| | if response_id > 0: |
| | user_prompt = st.session_state.responses[response_id-1].get("content", "") |
| | if user_prompt: |
| | if response_id > 0: |
| | retry_prompt = st.session_state.responses[response_id-1].get("content", "") |
| | del st.session_state.responses[response_id] |
| | del st.session_state.responses[response_id-1] |
| | st.session_state.follow_up_prompt = retry_prompt |
| | st.rerun() |
| |
|
| | |
| | prompt = st.chat_input("💬 Ask about air quality trends, pollution analysis, or city comparisons...", key="main_chat") |
| |
|
| | |
| | if selected_prompt: |
| | prompt = selected_prompt |
| |
|
| | |
| | if st.session_state.get("follow_up_prompt") and not st.session_state.get("processing"): |
| | prompt = st.session_state.follow_up_prompt |
| | st.session_state.follow_up_prompt = None |
| |
|
| | |
| | if prompt and not st.session_state.get("processing"): |
| | |
| | if "last_prompt" in st.session_state: |
| | last_prompt = st.session_state["last_prompt"] |
| | last_model_name = st.session_state.get("last_model_name", "") |
| | if (prompt == last_prompt) and (model_name == last_model_name): |
| | prompt = None |
| |
|
| | if prompt: |
| | |
| | user_response = get_from_user(prompt) |
| | st.session_state.responses.append(user_response) |
| | |
| | |
| | st.session_state.processing = True |
| | st.session_state.current_model = model_name |
| | st.session_state.current_question = prompt |
| | |
| | |
| | st.rerun() |
| |
|
| | |
| | if st.session_state.get("processing"): |
| | |
| | st.markdown(""" |
| | <div style='padding: 1rem; text-align: center; background: #f8fafc; border-radius: 8px; margin: 1rem 0;'> |
| | <div style='display: flex; align-items: center; justify-content: center; gap: 0.5rem; color: #475569;'> |
| | <div style='font-weight: 500;'>🤖 Processing with """ + str(st.session_state.get('current_model', 'Unknown')) + """</div> |
| | <div class='dots' style='display: inline-flex; gap: 2px;'> |
| | <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out;'></div> |
| | <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.16s;'></div> |
| | <div class='dot' style='width: 4px; height: 4px; background: #3b82f6; border-radius: 50%; animation: bounce 1.4s infinite ease-in-out; animation-delay: 0.32s;'></div> |
| | </div> |
| | </div> |
| | <div style='font-size: 0.75rem; color: #6b7280; margin-top: 0.25rem;'>Analyzing data and generating response...</div> |
| | </div> |
| | <style> |
| | @keyframes bounce { |
| | 0%, 80%, 100% { transform: scale(0.8); opacity: 0.5; } |
| | 40% { transform: scale(1.2); opacity: 1; } |
| | } |
| | </style> |
| | """, unsafe_allow_html=True) |
| | |
| | prompt = st.session_state.get("current_question") |
| | model_name = st.session_state.get("current_model") |
| | |
| | try: |
| | response = ask_question(model_name=model_name, question=prompt) |
| | |
| | if not isinstance(response, dict): |
| | response = { |
| | "role": "assistant", |
| | "content": "Error: Invalid response format", |
| | "gen_code": "", |
| | "ex_code": "", |
| | "last_prompt": prompt, |
| | "error": "Invalid response format", |
| | "timestamp": datetime.now().strftime("%H:%M") |
| | } |
| | |
| | response.setdefault("role", "assistant") |
| | response.setdefault("content", "No content generated") |
| | response.setdefault("gen_code", "") |
| | response.setdefault("ex_code", "") |
| | response.setdefault("last_prompt", prompt) |
| | response.setdefault("error", None) |
| | response.setdefault("timestamp", datetime.now().strftime("%H:%M")) |
| | |
| | except Exception as e: |
| | response = { |
| | "role": "assistant", |
| | "content": f"Sorry, I encountered an error: {str(e)}", |
| | "gen_code": "", |
| | "ex_code": "", |
| | "last_prompt": prompt, |
| | "error": str(e), |
| | "timestamp": datetime.now().strftime("%H:%M") |
| | } |
| |
|
| | st.session_state.responses.append(response) |
| | st.session_state["last_prompt"] = prompt |
| | st.session_state["last_model_name"] = model_name |
| | st.session_state.processing = False |
| | |
| | |
| | if "current_model" in st.session_state: |
| | del st.session_state.current_model |
| | if "current_question" in st.session_state: |
| | del st.session_state.current_question |
| | |
| | st.rerun() |
| |
|
| | |
| | st.markdown("</div>", unsafe_allow_html=True) |
| |
|
| | |
| | if st.session_state.get("processing"): |
| | st.markdown("<script>scrollToBottom();</script>", unsafe_allow_html=True) |
| |
|
| | |
| | st.markdown("### Dataset Info") |
| | st.markdown(""" |
| | <div style='background: #f1f5f9; border-radius: 8px; padding: 1rem; margin-bottom: 1rem;'> |
| | <h4 style='margin: 0 0 0.5rem 0; color: #1e293b; font-size: 0.9rem;'>PM2.5 Air Quality Data</h4> |
| | <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Time Range:</strong> 2022 - 2023</p> |
| | <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Locations:</strong> 300+ cities across India</p> |
| | <p style='margin: 0; font-size: 0.75rem; color: #475569;'><strong>Records:</strong> 100,000+ measurements</p> |
| | </div> |
| | """, unsafe_allow_html=True) |
| | |
| |
|
| | |
| | |
| | st.markdown(""" |
| | <style> |
| | /* Clean app background */ |
| | .stApp { |
| | background-color: #ffffff; |
| | color: #212529; |
| | font-family: 'Segoe UI', sans-serif; |
| | } |
| | |
| | /* Reduce main container padding */ |
| | .main .block-container { |
| | padding-top: 0px; |
| | padding-bottom: 3rem; |
| | max-width: 100%; |
| | } |
| | |
| | /* Remove excessive spacing */ |
| | .element-container { |
| | margin-bottom: 0.5rem !important; |
| | } |
| | |
| | /* Fix sidebar spacing */ |
| | [data-testid="stSidebar"] .element-container { |
| | margin-bottom: 0.25rem !important; |
| | } |
| | |
| | /* Sidebar */ |
| | [data-testid="stSidebar"] { |
| | background-color: #f8f9fa; |
| | border-right: 1px solid #dee2e6; |
| | padding: 1rem; |
| | } |
| | |
| | /* Optimize sidebar scrolling */ |
| | [data-testid="stSidebar"] > div:first-child { |
| | height: 100vh; |
| | overflow-y: auto; |
| | padding-bottom: 2rem; |
| | } |
| | |
| | [data-testid="stSidebar"]::-webkit-scrollbar { |
| | width: 6px; |
| | } |
| | |
| | [data-testid="stSidebar"]::-webkit-scrollbar-track { |
| | background: #f1f1f1; |
| | border-radius: 3px; |
| | } |
| | |
| | [data-testid="stSidebar"]::-webkit-scrollbar-thumb { |
| | background: #c1c1c1; |
| | border-radius: 3px; |
| | } |
| | |
| | [data-testid="stSidebar"]::-webkit-scrollbar-thumb:hover { |
| | background: #a1a1a1; |
| | } |
| | |
| | /* Main title */ |
| | .main-title { |
| | text-align: center; |
| | color: #343a40; |
| | font-size: 2.5rem; |
| | font-weight: 700; |
| | margin-bottom: 0.5rem; |
| | } |
| | |
| | /* Subtitle */ |
| | .subtitle { |
| | text-align: center; |
| | color: #6c757d; |
| | font-size: 1.1rem; |
| | margin-bottom: 1.5rem; |
| | } |
| | |
| | /* Instructions */ |
| | .instructions { |
| | background-color: #f1f3f5; |
| | border-left: 4px solid #0d6efd; |
| | padding: 1rem; |
| | margin-bottom: 1.5rem; |
| | border-radius: 6px; |
| | color: #495057; |
| | text-align: left; |
| | } |
| | |
| | /* Quick prompt buttons */ |
| | .quick-prompt-container { |
| | display: flex; |
| | flex-wrap: wrap; |
| | gap: 8px; |
| | margin-bottom: 1.5rem; |
| | padding: 1rem; |
| | background-color: #f8f9fa; |
| | border-radius: 10px; |
| | border: 1px solid #dee2e6; |
| | } |
| | |
| | .quick-prompt-btn { |
| | background-color: #0d6efd; |
| | color: white; |
| | border: none; |
| | padding: 8px 16px; |
| | border-radius: 20px; |
| | font-size: 0.9rem; |
| | cursor: pointer; |
| | transition: all 0.2s ease; |
| | white-space: nowrap; |
| | } |
| | |
| | .quick-prompt-btn:hover { |
| | background-color: #0b5ed7; |
| | transform: translateY(-2px); |
| | } |
| | |
| | /* User message styling */ |
| | .user-message { |
| | background: #3b82f6; |
| | color: white; |
| | padding: 0.75rem 1rem; |
| | border-radius: 7px; |
| | max-width: 95%; |
| | } |
| | |
| | .user-info { |
| | font-size: 0.875rem; |
| | opacity: 0.9; |
| | margin-bottom: 3px; |
| | } |
| | |
| | /* Assistant message styling */ |
| | .assistant-message { |
| | background: #f1f5f9; |
| | color: #334155; |
| | padding: 0.75rem 1rem; |
| | border-radius: 12px; |
| | max-width: 85%; |
| | } |
| | |
| | .assistant-info { |
| | font-size: 0.875rem; |
| | color: #6b7280; |
| | margin-bottom: 5px; |
| | } |
| | |
| | /* Processing indicator */ |
| | .processing-indicator { |
| | background: linear-gradient(135deg, #a8edea 0%, #fed6e3 100%); |
| | color: #333; |
| | padding: 1rem 1.5rem; |
| | border-radius: 12px; |
| | margin: 1rem 0; |
| | margin-left: 0; |
| | margin-right: auto; |
| | max-width: 70%; |
| | box-shadow: 0 2px 10px rgba(0,0,0,0.1); |
| | animation: pulse 2s infinite; |
| | } |
| | |
| | @keyframes pulse { |
| | 0% { opacity: 1; } |
| | 50% { opacity: 0.7; } |
| | 100% { opacity: 1; } |
| | } |
| | |
| | /* Feedback box */ |
| | .feedback-section { |
| | background-color: #f8f9fa; |
| | border: 1px solid #dee2e6; |
| | padding: 1rem; |
| | border-radius: 8px; |
| | margin: 1rem 0; |
| | } |
| | |
| | /* Success and error messages */ |
| | .success-message { |
| | background-color: #d1e7dd; |
| | color: #0f5132; |
| | padding: 1rem; |
| | border-radius: 6px; |
| | border: 1px solid #badbcc; |
| | } |
| | |
| | .error-message { |
| | background-color: #f8d7da; |
| | color: #842029; |
| | padding: 1rem; |
| | border-radius: 6px; |
| | border: 1px solid #f5c2c7; |
| | } |
| | |
| | /* Chat input styling - Fixed alignment */ |
| | # .stChatInput { |
| | # border-radius: 12px !important; |
| | # border: 2px solid #e5e7eb !important; |
| | # background: #ffffff !important; |
| | # padding: 0.75rem 1rem !important; |
| | # font-size: 1rem !important; |
| | # width: 100% !important; |
| | # max-width: 70% !important; |
| | # margin: 0 !important; |
| | # box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important; |
| | # transition: all 0.2s ease !important; |
| | # } |
| | |
| | # .stChatInput:focus { |
| | # border-color: #3b82f6 !important; |
| | # box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.1) !important; |
| | # outline: none !important; |
| | # } |
| | |
| | /* Chat input container */ |
| | .stChatInput > div { |
| | padding: 0 !important; |
| | margin: 0 !important; |
| | } |
| | |
| | /* Chat input text area */ |
| | # .stChatInput textarea { |
| | # border: none !important; |
| | # background: transparent !important; |
| | # padding: 0 !important; |
| | # margin: 0 !important; |
| | # font-size: 1rem !important; |
| | # line-height: 1.5 !important; |
| | # resize: none !important; |
| | # outline: none !important; |
| | # } |
| | |
| | /* Chat input placeholder */ |
| | # .stChatInput textarea::placeholder { |
| | # color: #9ca3af !important; |
| | # font-style: normal !important; |
| | # } |
| | |
| | .st-emotion-cache-f4ro0r { |
| | align-items = center; |
| | } |
| | |
| | /* Fix the main chat input container alignment */ |
| | [data-testid="stChatInput"] { |
| | position: fixed !important; |
| | bottom: 0.5rem !important; |
| | left: 6rem !important; |
| | right: 0 !important; |
| | background: #ffffff !important; |
| | width: 65% !important; |
| | box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.1) !important; |
| | } |
| | |
| | /* Adjust main content to account for fixed chat input */ |
| | .main .block-container { |
| | padding-bottom: 100px !important; |
| | } |
| | |
| | /* Chat input button styling */ |
| | [data-testid="stChatInput"] button { |
| | background: #3b82f6 !important; |
| | color: white !important; |
| | border: none !important; |
| | border-radius: 12px !important; |
| | font-weight: 600 !important; |
| | transition: background-color 0.2s ease !important; |
| | } |
| | |
| | [data-testid="stChatInput"] button:hover { |
| | background: #2563eb !important; |
| | } |
| | |
| | /* Textarea inside chat input */ |
| | [data-testid="stChatInput"] [data-baseweb="textarea"] { |
| | border: 2px solid #3b82f6 !important; |
| | border-radius: 12px !important; |
| | font-size: 16px !important; |
| | color: #111 !important; |
| | |
| | width: 100% !important; /* fill the parent container */ |
| | box-sizing: border-box !important; |
| | } |
| | |
| | /* Ensure proper spacing from sidebar */ |
| | @media (min-width: 768px) { |
| | [data-testid="stChatInput"] { |
| | margin-left: 21rem !important; /* Account for sidebar width */ |
| | } |
| | } |
| | |
| | /* Code container styling */ |
| | .code-container { |
| | margin: 1rem 0; |
| | border: 1px solid #d1d5db; |
| | border-radius: 12px; |
| | background: white; |
| | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); |
| | } |
| | |
| | .code-header { |
| | display: flex; |
| | justify-content: space-between; |
| | align-items: center; |
| | padding: 0.875rem 1.25rem; |
| | background: linear-gradient(135deg, #f8fafc 0%, #f1f5f9 100%); |
| | border-bottom: 1px solid #e2e8f0; |
| | cursor: pointer; |
| | transition: all 0.2s ease; |
| | border-radius: 12px 12px 0 0; |
| | } |
| | |
| | .code-header:hover { |
| | background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%); |
| | } |
| | |
| | .code-title { |
| | font-size: 0.9rem; |
| | font-weight: 600; |
| | color: #1e293b; |
| | display: flex; |
| | align-items: center; |
| | gap: 0.5rem; |
| | } |
| | |
| | .code-title:before { |
| | content: "⚡"; |
| | font-size: 0.8rem; |
| | } |
| | |
| | .toggle-text { |
| | font-size: 0.75rem; |
| | color: #64748b; |
| | font-weight: 500; |
| | } |
| | |
| | .code-block { |
| | background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%); |
| | color: #e2e8f0; |
| | padding: 1.5rem; |
| | font-family: 'SF Mono', 'Monaco', 'Menlo', 'Consolas', monospace; |
| | font-size: 0.875rem; |
| | overflow-x: auto; |
| | line-height: 1.6; |
| | border-radius: 0 0 12px 12px; |
| | } |
| | |
| | .answer-container { |
| | background: #f8fafc; |
| | border: 1px solid #e2e8f0; |
| | border-radius: 8px; |
| | padding: 1.5rem; |
| | margin: 1rem 0; |
| | } |
| | |
| | .answer-text { |
| | font-size: 1.125rem; |
| | color: #1e293b; |
| | line-height: 1.6; |
| | margin-bottom: 1rem; |
| | } |
| | |
| | .answer-highlight { |
| | background: #fef3c7; |
| | padding: 0.125rem 0.375rem; |
| | border-radius: 4px; |
| | font-weight: 600; |
| | color: #92400e; |
| | } |
| | |
| | .context-info { |
| | background: #f1f5f9; |
| | border-left: 4px solid #3b82f6; |
| | padding: 0.75rem 1rem; |
| | margin: 1rem 0; |
| | font-size: 0.875rem; |
| | color: #475569; |
| | } |
| | |
| | /* Hide default menu and footer */ |
| | #MainMenu {visibility: hidden;} |
| | footer {visibility: hidden;} |
| | header {visibility: hidden;} |
| | |
| | /* Auto scroll */ |
| | .main-container { |
| | height: 70vh; |
| | overflow-y: auto; |
| | } |
| | </style> |
| | """, unsafe_allow_html=True) |