"""
đ NAVADA 2.0 - Advanced AI Computer Vision Application (Lite Version)
Streamlit Version for Hugging Face Spaces Deployment
Enhanced Edition by Lee Akpareva | AI Consultant & Computer Vision Specialist
"""
import streamlit as st
import time
from datetime import datetime
import plotly.graph_objects as go
import plotly.express as px
from PIL import Image
import numpy as np
import os
# Configure Streamlit page (MUST be first!)
st.set_page_config(
page_title="đ NAVADA 2.0 - AI Computer Vision",
page_icon="đ",
layout="wide",
initial_sidebar_state="expanded"
)
# Backend imports - Lite version (no face recognition)
try:
from backend.yolo import detect_objects
from backend.openai_client import explain_detection
except ImportError as e:
st.error(f"â ī¸ Import error: {e}")
st.error("đĻ Please install dependencies: pip install -r requirements.txt")
st.stop()
# Custom CSS for enhanced styling
st.markdown("""
""", unsafe_allow_html=True)
def create_detection_chart(detected_objects):
"""Create an interactive chart showing detection statistics"""
# Count object types
object_counts = {}
for obj in detected_objects:
object_counts[obj] = object_counts.get(obj, 0) + 1
if not object_counts:
# Create empty chart
fig = go.Figure()
fig.add_annotation(
text="No objects detected",
xref="paper", yref="paper",
x=0.5, y=0.5, showarrow=False,
font=dict(size=20, color="gray")
)
fig.update_layout(
height=300,
showlegend=False,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
return fig
# Create bar chart
objects = list(object_counts.keys())
counts = list(object_counts.values())
fig = go.Figure(data=[
go.Bar(
x=objects,
y=counts,
marker_color='rgba(50, 171, 96, 0.6)',
marker_line_color='rgba(50, 171, 96, 1.0)',
marker_line_width=2,
text=counts,
textposition='auto'
)
])
fig.update_layout(
title="Detected Objects",
xaxis_title="Object Type",
yaxis_title="Count",
height=400,
showlegend=False,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
return fig
def main():
# Main header
st.markdown("""
đ NAVADA 2.0 - Advanced AI Computer Vision
Lite Version - Object Detection & AI Analysis
Built with YOLOv8 âĸ OpenAI âĸ Streamlit
""", unsafe_allow_html=True)
# Sidebar
with st.sidebar:
st.markdown("### đ¯ Detection Settings")
# Detection confidence threshold
confidence = st.slider(
"Detection Confidence",
min_value=0.1,
max_value=1.0,
value=0.5,
step=0.05,
help="Minimum confidence for object detection"
)
st.markdown("### đ Features")
st.markdown("""
- đ¯ **Object Detection**: YOLOv8 powered
- đ¤ **AI Explanations**: OpenAI integration
- đ **Interactive Charts**: Real-time analytics
- đ¨ **Visual Results**: Annotated images
""")
st.markdown("### âšī¸ About")
st.markdown("""
This is the **Lite Version** optimized for Hugging Face Spaces.
**Created by:** Lee Akpareva
**AI Consultant & Computer Vision Specialist**
""")
# Main content
col1, col2 = st.columns([2, 1])
with col1:
st.markdown("### đ¸ Upload Image for Analysis")
uploaded_file = st.file_uploader(
"Choose an image...",
type=['png', 'jpg', 'jpeg'],
help="Upload an image to detect objects and get AI analysis"
)
if uploaded_file is not None:
# Display uploaded image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
# Analysis button
if st.button("đ Analyze Image", type="primary"):
with st.spinner("đ Detecting objects..."):
# Perform object detection
annotated_img, detected_objects = detect_objects(image)
if detected_objects and len(detected_objects) > 0:
# Display results
st.success(f"â
Detected {len(detected_objects)} objects!")
# Show annotated image
st.markdown("### đ¯ Detection Results")
st.image(annotated_img, caption="Detected Objects", use_column_width=True)
# Show detection details
st.markdown("### đ Detected Objects")
# Count unique objects
object_counts = {}
for obj in detected_objects:
object_counts[obj] = object_counts.get(obj, 0) + 1
for i, (obj_name, count) in enumerate(object_counts.items()):
col_a, col_b, col_c = st.columns(3)
with col_a:
st.metric("Object", obj_name)
with col_b:
st.metric("Count", count)
with col_c:
st.metric("ID", f"#{i+1}")
# AI Explanation
if os.getenv("OPENAI_API_KEY"):
st.markdown("### đ¤ AI Analysis")
with st.spinner("đ§ Generating AI explanation..."):
try:
explanation = explain_detection(detected_objects)
st.markdown(f"**AI Insight:** {explanation}")
except Exception as e:
st.warning(f"AI analysis unavailable: {str(e)}")
else:
st.warning("đ Add OPENAI_API_KEY in settings for AI explanations")
else:
st.warning("â No objects detected in this image.")
with col2:
st.markdown("### đ Detection Statistics")
# Sample chart (will be updated with real data)
sample_data = {
'Object': ['Person', 'Car', 'Dog', 'Cat'],
'Count': [3, 2, 1, 1]
}
fig = px.bar(
sample_data,
x='Object',
y='Count',
title="Sample Detection Results",
color='Count',
color_continuous_scale='Viridis'
)
fig.update_layout(height=300)
st.plotly_chart(fig, use_container_width=True)
# Feature highlights
st.markdown("### ⨠Key Features")
features = [
("đ¯", "Object Detection", "Advanced YOLOv8 model"),
("đ¤", "AI Analysis", "OpenAI explanations"),
("đ", "Real-time Charts", "Interactive visualizations"),
("đ", "Fast Processing", "Optimized for speed")
]
for icon, title, desc in features:
st.markdown(f"""
""", unsafe_allow_html=True)
# Footer
st.markdown("---")
st.markdown("""
đ Experience Advanced Computer Vision
â Built by Lee Akpareva | AI Consultant & Computer Vision Specialist â
đ Powered by YOLOv8 âĸ OpenAI âĸ Streamlit
""", unsafe_allow_html=True)
if __name__ == "__main__":
main()