Spaces:
Sleeping
Sleeping
File size: 4,736 Bytes
0b53a6d 24a7f55 1a620e7 0b53a6d 2dcfb76 24a7f55 2dcfb76 24a7f55 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import streamlit as st
import time
import os
import sys
from pathlib import Path
# Add the project root to the Python path
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if project_root not in sys.path:
sys.path.insert(0, project_root)
# Import model manager using absolute import
from src.models.model_manager import model_manager
# Set page config
st.set_page_config(
page_title="π€ Agentic Browser",
page_icon="π€",
layout="wide"
)
# Custom CSS for better styling
st.markdown("""
<style>
.stTextInput > div > div > input {
padding: 12px;
border-radius: 8px;
border: 1px solid #e0e0e0;
}
.stButton > button {
width: 100%;
border-radius: 8px;
padding: 8px 16px;
font-weight: 500;
}
.stMarkdown h1 {
color: #1f2937;
margin-bottom: 0.5em;
}
.stMarkdown h3 {
color: #374151;
margin-top: 1.5em;
}
</style>
""", unsafe_allow_html=True)
# Initialize session state
if 'messages' not in st.session_state:
st.session_state.messages = []
st.session_state.model_loaded = False
st.session_state.current_model = None
def load_model(model_name):
"""Load the selected model."""
try:
with st.spinner(f"Loading {model_name}..."):
model_manager.load_model(model_name)
st.session_state.model_loaded = True
st.session_state.current_model = model_name
st.success(f"Successfully loaded {model_name} model!")
return True
except Exception as e:
st.error(f"Error loading model: {str(e)}")
return False
def generate_response(prompt, model_name, temperature=0.7):
"""Generate a response using the selected model."""
try:
if not st.session_state.model_loaded or st.session_state.current_model != model_name:
if not load_model(model_name):
return "Error: Failed to load model."
# Generate response
response = model_manager.generate_text(
model_name=model_name,
prompt=prompt,
temperature=temperature,
max_length=1024
)
return response
except Exception as e:
return f"Error generating response: {str(e)}"
# Sidebar for settings
with st.sidebar:
st.title("βοΈ Settings")
# Model selection
selected_model = st.selectbox(
"Select Model",
["tiny-llama", "mistral-7b"],
index=0,
help="Select the model to use for text generation"
)
# Temperature slider
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=1.0,
value=0.7,
step=0.1,
help="Controls randomness in the response generation. Lower = more deterministic, Higher = more creative"
)
# Load model button
if st.button("π Load Model"):
load_model(selected_model)
st.markdown("---")
st.markdown("### About")
st.markdown("""
**Agentic Browser** is an AI-powered web assistant that runs locally on your machine.
It uses open-source language models to provide helpful and contextual responses.
""")
st.markdown("---")
st.markdown("### Models")
st.markdown("""
- **TinyLlama**: Fast but less powerful (1.1B parameters)
- **Mistral-7B**: More powerful but requires more memory (7B parameters)
""")
# Main chat interface
st.title("π€ Agentic Browser")
st.caption("Powered by local AI models")
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Type your message here..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Generate and display assistant response
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# Generate response
response = generate_response(prompt, selected_model, temperature)
# Display response with streaming effect
for chunk in response.split():
full_response += chunk + " "
time.sleep(0.05)
message_placeholder.markdown(full_response + "β")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Auto-scroll to bottom
st.experimental_rerun() |