Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import time | |
| import os | |
| import sys | |
| from pathlib import Path | |
| # Add the project root to the Python path | |
| project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
| if project_root not in sys.path: | |
| sys.path.insert(0, project_root) | |
| # Import model manager using absolute import | |
| from src.models.model_manager import model_manager | |
| # Set page config | |
| st.set_page_config( | |
| page_title="π€ Agentic Browser", | |
| page_icon="π€", | |
| layout="wide" | |
| ) | |
| # Custom CSS for better styling | |
| st.markdown(""" | |
| <style> | |
| .stTextInput > div > div > input { | |
| padding: 12px; | |
| border-radius: 8px; | |
| border: 1px solid #e0e0e0; | |
| } | |
| .stButton > button { | |
| width: 100%; | |
| border-radius: 8px; | |
| padding: 8px 16px; | |
| font-weight: 500; | |
| } | |
| .stMarkdown h1 { | |
| color: #1f2937; | |
| margin-bottom: 0.5em; | |
| } | |
| .stMarkdown h3 { | |
| color: #374151; | |
| margin-top: 1.5em; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # Initialize session state | |
| if 'messages' not in st.session_state: | |
| st.session_state.messages = [] | |
| st.session_state.model_loaded = False | |
| st.session_state.current_model = None | |
| def load_model(model_name): | |
| """Load the selected model.""" | |
| try: | |
| with st.spinner(f"Loading {model_name}..."): | |
| model_manager.load_model(model_name) | |
| st.session_state.model_loaded = True | |
| st.session_state.current_model = model_name | |
| st.success(f"Successfully loaded {model_name} model!") | |
| return True | |
| except Exception as e: | |
| st.error(f"Error loading model: {str(e)}") | |
| return False | |
| def generate_response(prompt, model_name, temperature=0.7): | |
| """Generate a response using the selected model.""" | |
| try: | |
| if not st.session_state.model_loaded or st.session_state.current_model != model_name: | |
| if not load_model(model_name): | |
| return "Error: Failed to load model." | |
| # Generate response | |
| response = model_manager.generate_text( | |
| model_name=model_name, | |
| prompt=prompt, | |
| temperature=temperature, | |
| max_length=1024 | |
| ) | |
| return response | |
| except Exception as e: | |
| return f"Error generating response: {str(e)}" | |
| # Sidebar for settings | |
| with st.sidebar: | |
| st.title("βοΈ Settings") | |
| # Model selection | |
| selected_model = st.selectbox( | |
| "Select Model", | |
| ["tiny-llama", "mistral-7b"], | |
| index=0, | |
| help="Select the model to use for text generation" | |
| ) | |
| # Temperature slider | |
| temperature = st.slider( | |
| "Temperature", | |
| min_value=0.1, | |
| max_value=1.0, | |
| value=0.7, | |
| step=0.1, | |
| help="Controls randomness in the response generation. Lower = more deterministic, Higher = more creative" | |
| ) | |
| # Load model button | |
| if st.button("π Load Model"): | |
| load_model(selected_model) | |
| st.markdown("---") | |
| st.markdown("### About") | |
| st.markdown(""" | |
| **Agentic Browser** is an AI-powered web assistant that runs locally on your machine. | |
| It uses open-source language models to provide helpful and contextual responses. | |
| """) | |
| st.markdown("---") | |
| st.markdown("### Models") | |
| st.markdown(""" | |
| - **TinyLlama**: Fast but less powerful (1.1B parameters) | |
| - **Mistral-7B**: More powerful but requires more memory (7B parameters) | |
| """) | |
| # Main chat interface | |
| st.title("π€ Agentic Browser") | |
| st.caption("Powered by local AI models") | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Chat input | |
| if prompt := st.chat_input("Type your message here..."): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Generate and display assistant response | |
| with st.chat_message("assistant"): | |
| message_placeholder = st.empty() | |
| full_response = "" | |
| # Generate response | |
| response = generate_response(prompt, selected_model, temperature) | |
| # Display response with streaming effect | |
| for chunk in response.split(): | |
| full_response += chunk + " " | |
| time.sleep(0.05) | |
| message_placeholder.markdown(full_response + "β") | |
| message_placeholder.markdown(full_response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
| # Auto-scroll to bottom | |
| st.experimental_rerun() |