AlBaraa63 commited on
Commit
5d1e858
·
verified ·
1 Parent(s): 24ca93f

Upload 17 files

Browse files
.env.example CHANGED
@@ -1,80 +1,37 @@
1
- # ============================================================================
2
- # CleanCity Agent - Environment Configuration
3
- # ============================================================================
4
- # Copy this file to .env and configure your settings
5
-
6
- # ============================================================================
7
- # LLM PROVIDER SELECTION
8
- # ============================================================================
9
- # Choose: "anthropic" | "openai" | "gemini" | "offline"
10
- # offline = No API calls, uses mock responses (good for testing)
11
- LLM_PROVIDER=offline
12
-
13
- # ============================================================================
14
- # LLM API KEYS
15
- # ============================================================================
16
-
17
- # Anthropic Claude (Recommended - Latest: Claude 3.5 Sonnet)
18
- # Get your key: https://console.anthropic.com/
19
- # ANTHROPIC_API_KEY=sk-ant-api03-...
20
-
21
- # OpenAI (GPT-4o, GPT-4 Turbo)
22
- # Get your key: https://platform.openai.com/api-keys
23
- # OPENAI_API_KEY=sk-proj-...
24
-
25
- # Google Gemini (Gemini Pro, Gemini Ultra)
26
- # Get your key: https://makersuite.google.com/app/apikey
27
- # GEMINI_API_KEY=AIza...
28
-
29
- # ============================================================================
30
- # MODEL CONFIGURATION (Optional - defaults are set)
31
- # ============================================================================
32
-
33
- # Anthropic model selection
34
- # ANTHROPIC_MODEL=claude-3-5-sonnet-20241022
35
-
36
- # OpenAI model selection
37
- # OPENAI_MODEL=gpt-4o
38
-
39
- # Google Gemini model selection
40
- # GEMINI_MODEL=gemini-1.5-pro
41
-
42
- # ============================================================================
43
- # YOLO MODEL CONFIGURATION
44
- # ============================================================================
45
-
46
- # Path to YOLO weights file
47
- YOLO_MODEL_PATH=Weights/best.pt
48
-
49
- # Detection confidence threshold (0.0 to 1.0)
50
- YOLO_CONFIDENCE=0.25
51
-
52
- # ============================================================================
53
- # DATABASE CONFIGURATION
54
- # ============================================================================
55
-
56
- # SQLite database path
57
- DB_PATH=data/trash_events.db
58
-
59
- # ============================================================================
60
- # GRADIO SERVER CONFIGURATION
61
- # ============================================================================
62
-
63
- # Server host (0.0.0.0 = accessible from network, 127.0.0.1 = localhost only)
64
- GRADIO_SERVER_NAME=127.0.0.1
65
-
66
- # Server port
67
- GRADIO_SERVER_PORT=7860
68
-
69
- # Share publicly via Gradio link (true/false)
70
- GRADIO_SHARE=false
71
-
72
- # ============================================================================
73
- # LOGGING & DEBUG
74
- # ============================================================================
75
-
76
- # Log level: DEBUG, INFO, WARNING, ERROR
77
- LOG_LEVEL=INFO
78
-
79
- # Enable detailed YOLO output
80
- YOLO_VERBOSE=false
 
1
+ # Example environment configuration for CleanCity Agent
2
+ # Copy this file to .env and fill in your API keys
3
+
4
+ # ============================================================================
5
+ # LLM PROVIDER CONFIGURATION
6
+ # ============================================================================
7
+ # Choose your LLM provider: "anthropic" | "openai" | "gemini" | "offline"
8
+ # Default: "offline" (uses mock responses, no API key needed)
9
+ LLM_PROVIDER=offline
10
+
11
+ # ============================================================================
12
+ # API KEYS (only needed if not using offline mode)
13
+ # ============================================================================
14
+
15
+ # Anthropic Claude (recommended)
16
+ # Get your key at: https://console.anthropic.com/
17
+ # ANTHROPIC_API_KEY=sk-ant-...
18
+
19
+ # OpenAI GPT
20
+ # Get your key at: https://platform.openai.com/api-keys
21
+ # OPENAI_API_KEY=sk-...
22
+
23
+ # Google Gemini
24
+ # Get your key at: https://makersuite.google.com/app/apikey
25
+ # GEMINI_API_KEY=...
26
+
27
+ # ============================================================================
28
+ # OPTIONAL CONFIGURATION
29
+ # ============================================================================
30
+
31
+ # Database path (default: data/trash_events.db)
32
+ # DB_PATH=data/trash_events.db
33
+
34
+ # Gradio server configuration
35
+ # GRADIO_SERVER_NAME=0.0.0.0
36
+ # GRADIO_SERVER_PORT=7860
37
+ # GRADIO_SHARE=false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,48 +1,48 @@
1
- # Python
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
- *.so
6
- .Python
7
- build/
8
- develop-eggs/
9
- dist/
10
- downloads/
11
- eggs/
12
- .eggs/
13
- lib/
14
- lib64/
15
- parts/
16
- sdist/
17
- var/
18
- wheels/
19
- *.egg-info/
20
- .installed.cfg
21
- *.egg
22
-
23
- # Virtual Environment
24
- .venv/
25
- venv/
26
- ENV/
27
- env/
28
-
29
- # Environment Variables
30
- .env
31
-
32
- # Database
33
- data/*.db
34
- data/*.db-journal
35
-
36
- # IDE
37
- .vscode/
38
- .idea/
39
- *.swp
40
- *.swo
41
- *~
42
-
43
- # Gradio
44
- flagged/
45
-
46
- # OS
47
- .DS_Store
48
- Thumbs.db
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ .venv/
25
+ venv/
26
+ ENV/
27
+ env/
28
+
29
+ # Environment Variables
30
+ .env
31
+
32
+ # Database
33
+ data/*.db
34
+ data/*.db-journal
35
+
36
+ # IDE
37
+ .vscode/
38
+ .idea/
39
+ *.swp
40
+ *.swo
41
+ *~
42
+
43
+ # Gradio
44
+ flagged/
45
+
46
+ # OS
47
+ .DS_Store
48
+ Thumbs.db
README.md CHANGED
@@ -1,178 +1,395 @@
1
- ---
2
- title: CleanCity Agent - AI Trash Detection
3
- emoji: 🌍
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: "5.9.1"
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # 🌍 CleanCity Agent
13
-
14
- **AI-Powered Trash Detection & Cleanup Planner**
15
-
16
- CleanCity Agent is a modern web application that helps communities identify, track, and clean up littered areas using computer vision and AI-powered planning.
17
-
18
- ## ✨ Features
19
-
20
- ### 🔍 **Smart Trash Detection**
21
- - YOLOv8/v11 computer vision model
22
- - Real-time object detection with bounding boxes
23
- - Identifies multiple trash categories
24
- - Confidence scores for each detection
25
-
26
- ### 📋 **Intelligent Cleanup Planning**
27
- - Automatic severity assessment (Low/Medium/High)
28
- - Resource estimation (volunteers, time, equipment)
29
- - Environmental impact analysis
30
- - Actionable cleanup recommendations
31
-
32
- ### 💬 **AI Chat Assistant**
33
- - Ask questions about cleanup strategies
34
- - Get environmental impact assessments
35
- - Community organizing advice
36
- - Powered by latest LLMs (offline mode by default)
37
-
38
- ## 🚀 Quick Start
39
-
40
- ### Running Locally
41
-
42
- 1. **Clone the repository**
43
- ```bash
44
- git clone <your-repo-url>
45
- cd CleanCity
46
- ```
47
-
48
- 2. **Install dependencies**
49
- ```bash
50
- pip install -r requirements.txt
51
- ```
52
-
53
- 3. **Run the application**
54
- ```bash
55
- python app.py
56
- ```
57
-
58
- 4. **Open your browser**
59
- - Navigate to `http://127.0.0.1:7860`
60
-
61
- ## ⚙️ Configuration
62
-
63
- ### Environment Variables
64
-
65
- Copy `.env.example` to `.env` and configure:
66
-
67
- ```bash
68
- # LLM Provider (optional - defaults to offline)
69
- LLM_PROVIDER=offline # or: anthropic, openai, gemini
70
-
71
- # API Keys (only if using online LLM)
72
- # ANTHROPIC_API_KEY=sk-ant-...
73
- # OPENAI_API_KEY=sk-proj-...
74
- # GEMINI_API_KEY=AIza...
75
-
76
- # YOLO Model
77
- YOLO_MODEL_PATH=Weights/best.pt
78
- YOLO_CONFIDENCE=0.25
79
-
80
- # Server Settings
81
- GRADIO_SERVER_NAME=127.0.0.1
82
- GRADIO_SERVER_PORT=7860
83
- ```
84
-
85
- ## 🧠 Technology Stack
86
-
87
- - **Frontend:** Gradio 5.9.1
88
- - **Computer Vision:** Ultralytics YOLO (8.3.41+)
89
- - **Deep Learning:** PyTorch 2.5.1+
90
- - **LLM Providers:**
91
- - Anthropic Claude 3.5 Sonnet
92
- - OpenAI GPT-4o
93
- - Google Gemini 1.5 Pro
94
- - **Database:** SQLite (built-in)
95
-
96
- ## 📦 Project Structure
97
-
98
- ```
99
- CleanCity/
100
- ├── app.py # Gradio web interface
101
- ├── trash_model.py # YOLO detection module
102
- ├── llm_client.py # Multi-provider LLM client
103
- ├── requirements.txt # Python dependencies
104
- ├── .env.example # Environment template
105
- ├── Weights/
106
- │ └── best.pt # YOLO model weights
107
- ├── agents/ # AI agents (optional)
108
- ├── tools/ # Tools module (optional)
109
- └── data/ # Database storage
110
- ```
111
-
112
- ## 🎯 Usage
113
-
114
- ### 1. Analyze Image
115
- - Upload or capture an image
116
- - Click "Analyze Image"
117
- - View detected trash with bounding boxes
118
- - Review cleanup plan and recommendations
119
-
120
- ### 2. AI Assistant
121
- - Ask questions about cleanup planning
122
- - Get advice on environmental impact
123
- - Learn about community organizing
124
-
125
- ## 🔧 Development
126
-
127
- ### Requirements
128
- - Python 3.11+
129
- - YOLO model weights (`Weights/best.pt`)
130
- - Optional: API keys for LLM providers
131
-
132
- ### Running Tests
133
- ```bash
134
- # Test trash detection module
135
- python trash_model.py
136
-
137
- # Test LLM client
138
- python llm_client.py
139
- ```
140
-
141
- ## 🌐 Deployment
142
-
143
- ### Hugging Face Spaces
144
- This app is configured to run on Hugging Face Spaces with Gradio SDK.
145
-
146
- ### Local Server
147
- ```bash
148
- python app.py
149
- ```
150
-
151
- ## 🔒 Privacy
152
-
153
- - All image processing happens **locally** on your device
154
- - No images are uploaded to external servers
155
- - LLM API calls only if you configure API keys
156
- - Your privacy is our priority
157
-
158
- ## 📄 License
159
-
160
- MIT License - feel free to use and modify for your projects!
161
-
162
- ## 🤝 Contributing
163
-
164
- Contributions are welcome! Feel free to:
165
- - Report bugs
166
- - Suggest features
167
- - Submit pull requests
168
-
169
- ## 🌟 Acknowledgments
170
-
171
- Built with:
172
- - [Gradio](https://gradio.app/) - Web UI framework
173
- - [Ultralytics YOLO](https://github.com/ultralytics/ultralytics) - Object detection
174
- - [Anthropic](https://anthropic.com/), [OpenAI](https://openai.com/), [Google](https://ai.google.dev/) - LLM providers
175
-
176
- ---
177
-
178
- **Made with ❤️ for a cleaner planet 🌍**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: CleanCity Agent - AI Trash Detection & Cleanup Planner
3
+ emoji: 🌍
4
+ colorFrom: green
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: "5.9.1"
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - mcp-in-action-track-consumer
12
+ - mcp
13
+ - anthropic
14
+ - computer-vision
15
+ - environmental
16
+ - gradio-hackathon
17
+ - ai-agents
18
+ - mcp-server
19
+ ---
20
+
21
+ # 🌍 CleanCity Agent
22
+
23
+ **Autonomous Trash Detection & Cleanup Planner**
24
+
25
+ > 🏆 **MCP's 1st Birthday Hackathon Submission**
26
+ > **Track:** MCP in Action - Consumer Applications
27
+ > **Tags:** `mcp-in-action-track-consumer`
28
+
29
+ CleanCity Agent is an AI-powered web application that helps communities identify, track, and clean up littered areas. Upload an image of trash, get instant analysis, receive cleanup recommendations, and track environmental improvements over time.
30
+
31
+ <p align="center">
32
+ <img src="https://img.shields.io/badge/Python-3.11+-blue.svg" alt="Python 3.11+">
33
+ <img src="https://img.shields.io/badge/Gradio-6.0-orange.svg" alt="Gradio">
34
+ <img src="https://img.shields.io/badge/MCP-Enabled-green.svg" alt="MCP">
35
+ <img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="MIT License">
36
+ </p>
37
+
38
+ ---
39
+
40
+ ## Features
41
+
42
+ ### 🔍 **Smart Trash Detection**
43
+ - Computer vision-powered object detection
44
+ - Identifies common litter types: bottles, bags, wrappers, cigarette butts, etc.
45
+ - Visual bounding boxes with confidence scores
46
+
47
+ ### 📋 **Intelligent Cleanup Planning**
48
+ - Automatic severity assessment (Low/Medium/High)
49
+ - Resource estimation (volunteers, time, equipment)
50
+ - Environmental impact analysis
51
+ - Actionable recommendations
52
+
53
+ ### 📊 **Historical Tracking**
54
+ - SQLite database for event logging
55
+ - Filter by location, date, severity
56
+ - Identify recurring "hotspots"
57
+ - Track cleanup progress over time
58
+
59
+ ### 📄 **Report Generation**
60
+ - Professional reports for city authorities
61
+ - Email-ready templates
62
+ - Multiple formats (Email, Markdown, Plain text)
63
+ - LLM-enhanced descriptions (optional)
64
+
65
+ ### 💬 **AI Chat Assistant**
66
+ - Ask questions about your analysis
67
+ - Get cleanup strategy advice
68
+ - Understand environmental impact
69
+ - Community organizing tips
70
+
71
+ ### 🔌 **MCP Integration**
72
+ - Expose tools via Model Context Protocol
73
+ - Compatible with Claude Desktop and other MCP clients
74
+ - Programmatic access to all features
75
+
76
+ ---
77
+
78
+ ## 🚀 Quick Start
79
+
80
+ ### Prerequisites
81
+
82
+ - **Python 3.11+**
83
+ - **pip** (Python package manager)
84
+ - **git** (optional, for cloning)
85
+
86
+ ### Installation
87
+
88
+ 1. **Clone or download this repository**
89
+ ```bash
90
+ git clone <repository-url>
91
+ cd track2.1
92
+ ```
93
+
94
+ 2. **Create a virtual environment**
95
+ ```bash
96
+ python -m venv .venv
97
+ ```
98
+
99
+ 3. **Activate the virtual environment**
100
+ - Windows (PowerShell):
101
+ ```powershell
102
+ .venv\Scripts\Activate.ps1
103
+ ```
104
+ - Windows (Command Prompt):
105
+ ```cmd
106
+ .venv\Scripts\activate.bat
107
+ ```
108
+ - macOS/Linux:
109
+ ```bash
110
+ source .venv/bin/activate
111
+ ```
112
+
113
+ 4. **Install dependencies**
114
+ ```bash
115
+ pip install -r requirements.txt
116
+ ```
117
+
118
+ 5. **Configure environment (optional)**
119
+ ```bash
120
+ copy .env.example .env
121
+ # Edit .env with your API keys if using LLM features
122
+ ```
123
+
124
+ 6. **Run the application**
125
+ ```bash
126
+ python app.py
127
+ ```
128
+
129
+ 7. **Open your browser**
130
+ Navigate to: **http://localhost:7860**
131
+
132
+ ---
133
+
134
+ ## 🎯 User Walkthrough
135
+
136
+ ### Step 1: Upload an Image
137
+ - Click the image upload area or use webcam
138
+ - Select a photo showing trash in streets, parks, or beaches
139
+ - Optionally add location and notes
140
+
141
+ ### Step 2: Analyze
142
+ - Click **"Start Analysis"**
143
+ - AI detects and highlights trash items
144
+ - View detection results with confidence scores
145
+
146
+ ### Step 3: Review Plan
147
+ - Get severity assessment (Low/Medium/High)
148
+ - See volunteer and time estimates
149
+ - Review equipment recommendations
150
+ - Understand environmental impact
151
+
152
+ ### Step 4: Save & Track
153
+ - Events are saved to history (if enabled)
154
+ - View past events in the History tab
155
+ - Identify hotspots in the Hotspots tab
156
+
157
+ ### Step 5: Take Action
158
+ - Copy the generated email report
159
+ - Send to city environmental department
160
+ - Share with community cleanup groups
161
+ - Organize volunteers and execute cleanup
162
+
163
+ ---
164
+
165
+ ## 🏗️ Architecture
166
+
167
+ ### Project Structure
168
+
169
+ ```
170
+ track2.1/
171
+ ├── app.py # Main Gradio UI application
172
+ ├── mcp_server.py # MCP server for tool exposure
173
+ ├── llm_client.py # LLM abstraction layer
174
+ ├── trash_model.py # Trash detection model wrapper
175
+ ├── agents/
176
+ │ └── planner_agent.py # Cleanup workflow orchestrator
177
+ ├── tools/
178
+ │ ├── trash_detection_tool.py # Detection MCP tool
179
+ │ ├── cleanup_planner_tool.py # Planning logic
180
+ │ ├── history_tool.py # Event logging & querying
181
+ │ └── report_generator_tool.py # Report generation
182
+ ├── data/
183
+ │ └── trash_events.db # SQLite database (auto-created)
184
+ ├── Weights/
185
+ │ └── best.pt # Model weights (for real model)
186
+ ├── requirements.txt # Python dependencies
187
+ ├── .env.example # Environment template
188
+ └── README.md # This file
189
+ ```
190
+
191
+ ### Technology Stack
192
+
193
+ - **Frontend**: Gradio 6.x (web UI framework)
194
+ - **AI/ML**: Pluggable detection model (currently mock)
195
+ - **MCP**: FastMCP for tool exposure
196
+ - **LLM**: Multi-provider support (Anthropic, OpenAI, Gemini)
197
+ - **Database**: SQLite (local, file-based)
198
+ - **Image Processing**: Pillow (PIL)
199
+
200
+ ---
201
+
202
+ ## 🔧 Configuration
203
+
204
+ ### LLM Providers
205
+
206
+ CleanCity Agent works **offline by default** with mock responses. To enable real LLM capabilities:
207
+
208
+ 1. Copy `.env.example` to `.env`
209
+ 2. Set `LLM_PROVIDER` to your preferred provider:
210
+ - `anthropic` - Claude (recommended)
211
+ - `openai` - GPT-4
212
+ - `gemini` - Google Gemini
213
+ - `offline` - Mock responses (no API key needed)
214
+
215
+ 3. Add your API key:
216
+ ```env
217
+ LLM_PROVIDER=anthropic
218
+ ANTHROPIC_API_KEY=sk-ant-your-key-here
219
+ ```
220
+
221
+ ### Trash Detection Model
222
+
223
+ The current implementation uses a **mock detector** for demonstration. To integrate a real model:
224
+
225
+ 1. **Option A: Use existing weights** (Weights/best.pt)
226
+ - If you have a YOLOv8/YOLOv5 model:
227
+ ```python
228
+ from ultralytics import YOLO
229
+ model = YOLO("Weights/best.pt")
230
+ ```
231
+ - Update `trash_model.py` with real inference code
232
+
233
+ 2. **Option B: Hugging Face model**
234
+ ```python
235
+ from transformers import AutoModelForObjectDetection
236
+ model = AutoModelForObjectDetection.from_pretrained("model-name")
237
+ ```
238
+
239
+ 3. **Option C: External API**
240
+ - Connect to Roboflow, Hugging Face Inference API, etc.
241
+
242
+ See `trash_model.py` for integration points and TODOs.
243
+
244
+ ---
245
+
246
+ ## 🐳 Deployment
247
+
248
+ ### Local Deployment
249
+ Already covered in Quick Start section above.
250
+
251
+ ### Hugging Face Spaces
252
+
253
+ 1. Create a new Space at https://huggingface.co/spaces
254
+ 2. Select "Gradio" as the SDK
255
+ 3. Upload all project files
256
+ 4. Add secrets for API keys in Space settings
257
+ 5. Space will auto-deploy from `app.py`
258
+
259
+ ### Docker (Manual)
260
+
261
+ ```dockerfile
262
+ FROM python:3.11-slim
263
+
264
+ WORKDIR /app
265
+ COPY requirements.txt .
266
+ RUN pip install -r requirements.txt
267
+
268
+ COPY . .
269
+
270
+ EXPOSE 7860
271
+ CMD ["python", "app.py"]
272
+ ```
273
+
274
+ Build and run:
275
+ ```bash
276
+ docker build -t cleancity-agent .
277
+ docker run -p 7860:7860 cleancity-agent
278
+ ```
279
+
280
+ ---
281
+
282
+ ## 🛠️ MCP Server Usage
283
+
284
+ The MCP server exposes all tools for programmatic access:
285
+
286
+ ### Running the MCP Server
287
+
288
+ ```bash
289
+ python mcp_server.py
290
+ ```
291
+
292
+ ### Available Tools
293
+
294
+ 1. **detect_trash** - Detect trash in images
295
+ 2. **plan_cleanup** - Generate cleanup plans
296
+ 3. **log_event** - Save events to database
297
+ 4. **query_events** - Search historical events
298
+ 5. **get_hotspots** - Identify recurring problem areas
299
+ 6. **generate_report** - Create formatted reports
300
+ 7. **mark_cleaned** - Update event status
301
+
302
+ ### Claude Desktop Integration
303
+
304
+ Add to your Claude Desktop configuration (`claude_desktop_config.json`):
305
+
306
+ ```json
307
+ {
308
+ "mcpServers": {
309
+ "cleancity": {
310
+ "command": "python",
311
+ "args": ["C:\\path\\to\\track2.1\\mcp_server.py"]
312
+ }
313
+ }
314
+ }
315
+ ```
316
+
317
+ ---
318
+
319
+ ## 📊 Database Schema
320
+
321
+ SQLite database (`data/trash_events.db`) with the following schema:
322
+
323
+ ```sql
324
+ CREATE TABLE events (
325
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
326
+ timestamp TEXT NOT NULL,
327
+ location TEXT,
328
+ latitude REAL,
329
+ longitude REAL,
330
+ severity TEXT NOT NULL,
331
+ trash_count INTEGER NOT NULL,
332
+ categories TEXT NOT NULL, -- JSON array
333
+ detections_json TEXT NOT NULL, -- JSON array
334
+ notes TEXT,
335
+ image_path TEXT,
336
+ cleaned BOOLEAN DEFAULT 0,
337
+ created_at TEXT DEFAULT CURRENT_TIMESTAMP
338
+ );
339
+ ```
340
+
341
+ ---
342
+
343
+ ## 🤝 Contributing
344
+
345
+ Contributions are welcome! Areas for improvement:
346
+
347
+ - **Real trash detection model** integration
348
+ - **GPS/mapping** features for hotspot visualization
349
+ - **Multi-user support** with authentication
350
+ - **Mobile app** wrapper (React Native, Flutter)
351
+ - **Gamification** (points, badges for cleanups)
352
+ - **Social sharing** features
353
+ - **Volunteer coordination** tools
354
+
355
+ Please open an issue or PR on the repository.
356
+
357
+ ---
358
+
359
+ ## ⚠️ Limitations
360
+
361
+ - **Mock detection**: Currently uses random detections for demonstration
362
+ - **Local storage**: Data stored locally, not synchronized
363
+ - **No authentication**: Single-user design
364
+ - **Detection accuracy**: Depends on image quality and model training
365
+ - **LLM costs**: Using real LLM APIs incurs API charges
366
+
367
+ This is a **prototype** designed for community groups and individual activists. Production deployment requires additional hardening.
368
+
369
+ ---
370
+
371
+ ## 📜 License
372
+
373
+ MIT License - see LICENSE file for details.
374
+
375
+ ---
376
+
377
+ ## 🙏 Acknowledgments
378
+
379
+ - Built with [Gradio](https://gradio.app/)
380
+ - Powered by [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)
381
+ - LLM support via Anthropic, OpenAI, and Google
382
+ - Inspired by community environmental activists worldwide
383
+
384
+ ---
385
+
386
+ ## 📧 Support
387
+
388
+ For questions, issues, or feature requests:
389
+ - Open an issue on GitHub
390
+ - Check the FAQ in the app's "How It Works" tab
391
+ - Review the inline code documentation
392
+
393
+ ---
394
+
395
+ **Let's make our cities cleaner, together! 🌍♻️**
agents/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Agents package for CleanCity Agent."""
2
+
3
+ from .planner_agent import run_cleanup_workflow
4
+
5
+ __all__ = ["run_cleanup_workflow"]
agents/planner_agent.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cleanup Planning Agent
3
+
4
+ Orchestrates the full workflow from image analysis to actionable cleanup plans.
5
+ """
6
+
7
+ from typing import Optional, Any
8
+ from PIL import Image
9
+ import base64
10
+ from io import BytesIO
11
+
12
+ from tools.trash_detection_tool import detect_trash_mcp, format_detections_for_display
13
+ from tools.cleanup_planner_tool import plan_cleanup
14
+ from tools.history_tool import log_event, get_hotspots
15
+ from tools.report_generator_tool import generate_report
16
+ from trash_model import Detection
17
+
18
+
19
+ def run_cleanup_workflow(
20
+ image: Image.Image,
21
+ location: Optional[str] = None,
22
+ notes: Optional[str] = None,
23
+ save_to_history: bool = True,
24
+ use_llm_enhancement: bool = False,
25
+ latitude: Optional[float] = None,
26
+ longitude: Optional[float] = None
27
+ ):
28
+ """
29
+ Execute the complete cleanup workflow for a trash detection event.
30
+
31
+ This is the main orchestration function that:
32
+ 1. Detects trash in the image
33
+ 2. Plans cleanup actions
34
+ 3. Optionally logs to history
35
+ 4. Generates reports
36
+ 5. Returns all results for UI display
37
+
38
+ Args:
39
+ image: PIL Image to analyze
40
+ location: Optional location description
41
+ notes: Optional user notes
42
+ save_to_history: Whether to log event to database
43
+ use_llm_enhancement: Whether to use LLM for enhanced text generation
44
+ latitude: Optional GPS latitude
45
+ longitude: Optional GPS longitude
46
+
47
+ Returns:
48
+ Dict containing:
49
+ - detection_results: Raw detection data
50
+ - plan: Cleanup plan
51
+ - report: Generated report text
52
+ - event_id: Database ID (if saved)
53
+ - visualization_data: Data for UI overlay
54
+ - summary: Human-readable summary
55
+ """
56
+ # Step 1: Detect trash
57
+ print("🔍 Step 1: Detecting trash in image...")
58
+
59
+ # Convert image to base64 for tool (simulating MCP data format)
60
+ buffered = BytesIO()
61
+ image.save(buffered, format="PNG")
62
+ img_b64 = base64.b64encode(buffered.getvalue()).decode()
63
+
64
+ detection_result = detect_trash_mcp(img_b64)
65
+ detections: list[Detection] = detection_result["detections"]
66
+
67
+ print(f" Found {detection_result['count']} items")
68
+
69
+ if not detections:
70
+ return {
71
+ "detection_results": detection_result,
72
+ "plan": None,
73
+ "report": None,
74
+ "event_id": None,
75
+ "visualization_data": None,
76
+ "summary": "No trash detected in this image. The area appears clean!",
77
+ "status": "no_trash"
78
+ }
79
+
80
+ # Step 2: Plan cleanup
81
+ print("📋 Step 2: Planning cleanup actions...")
82
+ plan = plan_cleanup(
83
+ detections,
84
+ location=location,
85
+ notes=notes,
86
+ use_llm=use_llm_enhancement
87
+ )
88
+ print(f" Severity: {plan['severity']}, Volunteers: {plan['recommended_volunteers']}")
89
+
90
+ # Step 3: Log to history (if requested)
91
+ event_id = None
92
+ if save_to_history:
93
+ print("💾 Step 3: Logging event to history...")
94
+ log_result = log_event(
95
+ detections=detections,
96
+ severity=plan["severity"],
97
+ location=location,
98
+ notes=notes,
99
+ image_path=None, # Could save image file here
100
+ latitude=latitude,
101
+ longitude=longitude
102
+ )
103
+ event_id = log_result["event_id"]
104
+ print(f" Saved as event #{event_id}")
105
+
106
+ # Step 4: Generate report
107
+ print("📄 Step 4: Generating report...")
108
+ report_result = generate_report(
109
+ detections=detections,
110
+ severity=plan["severity"],
111
+ location=location,
112
+ notes=notes,
113
+ event_id=event_id,
114
+ plan=plan,
115
+ format="email"
116
+ )
117
+ report_text = report_result["report"]
118
+
119
+ # Step 5: Prepare visualization data
120
+ visualization_data = _prepare_visualization_data(image, detections)
121
+
122
+ # Step 6: Create summary
123
+ summary = _create_workflow_summary(detection_result, plan, event_id)
124
+
125
+ print("✅ Workflow complete!")
126
+
127
+ return {
128
+ "detection_results": detection_result,
129
+ "plan": plan,
130
+ "report": report_text,
131
+ "event_id": event_id,
132
+ "visualization_data": visualization_data,
133
+ "summary": summary,
134
+ "status": "success"
135
+ }
136
+
137
+
138
+ def _prepare_visualization_data(image: Image.Image, detections: list[Detection]):
139
+ """Prepare data for drawing bounding boxes on image."""
140
+ return {
141
+ "image_size": {"width": image.width, "height": image.height},
142
+ "boxes": [
143
+ {
144
+ "bbox": det["bbox"],
145
+ "label": det["label"],
146
+ "score": det["score"],
147
+ "color": _get_color_for_label(det["label"])
148
+ }
149
+ for det in detections
150
+ ]
151
+ }
152
+
153
+
154
+ def _get_color_for_label(label: str) -> str:
155
+ """Get consistent color for trash category."""
156
+ color_map = {
157
+ "plastic_bottle": "#FF6B6B",
158
+ "plastic_bag": "#4ECDC4",
159
+ "food_wrapper": "#FFD93D",
160
+ "cigarette_butt": "#95E1D3",
161
+ "paper_cup": "#F38181",
162
+ "aluminum_can": "#AA96DA",
163
+ "food_container": "#FCBAD3",
164
+ "cardboard_box": "#A8D8EA",
165
+ "glass_bottle": "#FFA07A",
166
+ "other_trash": "#B0B0B0"
167
+ }
168
+ return color_map.get(label, "#FF0000")
169
+
170
+
171
+ def _create_workflow_summary(
172
+ detection_result,
173
+ plan,
174
+ event_id: Optional[int]
175
+ ) -> str:
176
+ """Create human-readable workflow summary."""
177
+ count = detection_result["count"]
178
+ categories = detection_result["categories"]
179
+ severity = plan["severity"]
180
+ volunteers = plan["recommended_volunteers"]
181
+ time = plan["estimated_time_minutes"]
182
+
183
+ category_text = ", ".join(categories[:3])
184
+ if len(categories) > 3:
185
+ category_text += f" and {len(categories) - 3} more"
186
+
187
+ summary = f"""**Analysis Complete**
188
+
189
+ Detected **{count} trash items** across {len(categories)} categories ({category_text}).
190
+
191
+ **Severity:** {severity.upper()}
192
+
193
+ **Recommended Action:**
194
+ - {volunteers} volunteer(s) needed
195
+ - Approximately {time} minutes
196
+ - Action within {plan['urgency_days']} day(s)
197
+ """
198
+
199
+ if event_id:
200
+ summary += f"\n✓ Event saved to history (ID: {event_id})"
201
+
202
+ return summary
203
+
204
+
205
+ def analyze_hotspots(days: int = 30):
206
+ """
207
+ Analyze trash hotspots from historical data.
208
+
209
+ Args:
210
+ days: Time window for analysis
211
+
212
+ Returns:
213
+ Hotspot analysis with recommendations
214
+ """
215
+ hotspots_data = get_hotspots(min_events=2, days=days)
216
+
217
+ if not hotspots_data["hotspots"]:
218
+ return {
219
+ "hotspots": [],
220
+ "message": f"No recurring hotspots found in the last {days} days.",
221
+ "recommendation": "Continue monitoring and logging new events."
222
+ }
223
+
224
+ # Analyze hotspots
225
+ top_hotspot = hotspots_data["hotspots"][0]
226
+
227
+ recommendation = f"""**Hotspot Alert**
228
+
229
+ {hotspots_data['count']} location(s) with recurring trash issues identified.
230
+
231
+ **Top Problem Area:** {top_hotspot['location']}
232
+ - {top_hotspot['event_count']} events recorded
233
+ - {top_hotspot['total_trash']} total items
234
+ - Last event: {top_hotspot['last_event']}
235
+
236
+ **Recommendation:** Consider setting up regular cleanup schedule or requesting permanent waste receptacles for this location.
237
+ """
238
+
239
+ return {
240
+ "hotspots": hotspots_data["hotspots"],
241
+ "count": hotspots_data["count"],
242
+ "recommendation": recommendation,
243
+ "top_hotspot": top_hotspot
244
+ }
app.py CHANGED
@@ -1,503 +1,646 @@
1
- """
2
- CleanCity Agent - Gradio Web Application
3
-
4
- Modern web interface for trash detection and cleanup planning.
5
- Built with Gradio 5.x with latest features.
6
- """
7
-
8
- import gradio as gr
9
- from PIL import Image, ImageDraw, ImageFont
10
- import io
11
- import base64
12
- from datetime import datetime
13
- from typing import Optional, Tuple
14
- import logging
15
-
16
- from trash_model import detect_trash, get_model_info
17
- from llm_client import get_llm_client
18
-
19
- # Configure logging
20
- logging.basicConfig(level=logging.INFO)
21
- logger = logging.getLogger(__name__)
22
-
23
- # ============================================================================
24
- # UI CONSTANTS
25
- # ============================================================================
26
-
27
- TITLE = "🌍 CleanCity Agent"
28
- DESCRIPTION = """
29
- **AI-Powered Trash Detection & Cleanup Planner**
30
-
31
- Upload an image to detect trash, get cleanup recommendations, and track environmental improvements.
32
- """
33
-
34
- CSS = """
35
- .primary-btn {
36
- background: linear-gradient(90deg, #10b981 0%, #059669 100%) !important;
37
- border: none !important;
38
- }
39
- .secondary-btn {
40
- background: linear-gradient(90deg, #3b82f6 0%, #2563eb 100%) !important;
41
- border: none !important;
42
- }
43
- .container {
44
- max-width: 1200px;
45
- margin: auto;
46
- }
47
- .info-box {
48
- background: #f0fdf4;
49
- border-left: 4px solid #10b981;
50
- padding: 1rem;
51
- margin: 1rem 0;
52
- border-radius: 0.5rem;
53
- }
54
- """
55
-
56
- # ============================================================================
57
- # HELPER FUNCTIONS
58
- # ============================================================================
59
-
60
- def draw_detections(image: Image.Image, detections: list) -> Image.Image:
61
- """
62
- Draw bounding boxes and labels on image.
63
-
64
- Args:
65
- image: Original PIL Image
66
- detections: List of Detection objects
67
-
68
- Returns:
69
- Image with drawn annotations
70
- """
71
- # Create a copy
72
- img_copy = image.copy()
73
- draw = ImageDraw.Draw(img_copy)
74
-
75
- # Try to load a font, fallback to default
76
- try:
77
- font = ImageFont.truetype("arial.ttf", 20)
78
- except:
79
- font = ImageFont.load_default()
80
-
81
- # Color palette for different classes
82
- colors = [
83
- "#ef4444", "#f59e0b", "#10b981", "#3b82f6",
84
- "#8b5cf6", "#ec4899", "#06b6d4", "#84cc16"
85
- ]
86
-
87
- for i, det in enumerate(detections):
88
- bbox = det["bbox"]
89
- label = det["label"]
90
- score = det["score"]
91
-
92
- # Get color for this class
93
- color = colors[det.get("class_id", i) % len(colors)]
94
-
95
- # Draw bounding box
96
- draw.rectangle(bbox, outline=color, width=3)
97
-
98
- # Draw label background
99
- text = f"{label} {score:.2f}"
100
- text_bbox = draw.textbbox((bbox[0], bbox[1] - 25), text, font=font)
101
- draw.rectangle(text_bbox, fill=color)
102
-
103
- # Draw label text
104
- draw.text((bbox[0], bbox[1] - 25), text, fill="white", font=font)
105
-
106
- return img_copy
107
-
108
-
109
- def analyze_severity(detections: list) -> str:
110
- """Determine cleanup severity based on detections."""
111
- count = len(detections)
112
- if count == 0:
113
- return "None"
114
- elif count <= 3:
115
- return "Low"
116
- elif count <= 8:
117
- return "Medium"
118
- else:
119
- return "High"
120
-
121
-
122
- def estimate_resources(detections: list, severity: str) -> dict:
123
- """Estimate cleanup resources needed."""
124
- count = len(detections)
125
-
126
- if severity == "Low":
127
- volunteers = "1-2"
128
- time = "30 mins - 1 hour"
129
- elif severity == "Medium":
130
- volunteers = "3-5"
131
- time = "1-2 hours"
132
- else:
133
- volunteers = "6-10"
134
- time = "2-4 hours"
135
-
136
- equipment = [
137
- "Heavy-duty trash bags",
138
- "Gloves",
139
- "Grabber tools",
140
- "Safety vests"
141
- ]
142
-
143
- if count > 5:
144
- equipment.append("Wheelbarrow or cart")
145
-
146
- return {
147
- "volunteers": volunteers,
148
- "time": time,
149
- "equipment": equipment
150
- }
151
-
152
-
153
- # ============================================================================
154
- # CORE ANALYSIS FUNCTION
155
- # ============================================================================
156
-
157
- def analyze_image(image: Optional[Image.Image]) -> Tuple[Optional[Image.Image], str, str]:
158
- """
159
- Analyze uploaded image for trash.
160
-
161
- Args:
162
- image: PIL Image from user
163
-
164
- Returns:
165
- Tuple of (annotated_image, analysis_text, status_message)
166
- """
167
- if image is None:
168
- return None, "", "⚠️ Please upload an image first"
169
-
170
- try:
171
- # Run trash detection
172
- logger.info("Running trash detection...")
173
- detections = detect_trash(image)
174
-
175
- # Draw detections on image
176
- annotated_image = draw_detections(image, detections)
177
-
178
- # Analyze results
179
- count = len(detections)
180
- severity = analyze_severity(detections)
181
-
182
- if count == 0:
183
- analysis = "✅ **No trash detected!** This area looks clean."
184
- status = "✅ Analysis complete - No trash found"
185
- else:
186
- # Get resource estimates
187
- resources = estimate_resources(detections, severity)
188
-
189
- # Build analysis report
190
- analysis = f"""
191
- ## 🔍 Detection Results
192
-
193
- **Trash Items Detected:** {count}
194
- **Severity Level:** {severity}
195
-
196
- ### Detected Items:
197
- """
198
- # List detected items
199
- item_counts = {}
200
- for det in detections:
201
- label = det["label"]
202
- item_counts[label] = item_counts.get(label, 0) + 1
203
-
204
- for label, count in item_counts.items():
205
- analysis += f"- **{label}**: {count} item(s)\n"
206
-
207
- analysis += f"""
208
- ---
209
-
210
- ## 📋 Cleanup Plan
211
-
212
- **Volunteers Needed:** {resources['volunteers']} people
213
- **Estimated Time:** {resources['time']}
214
-
215
- **Equipment Required:**
216
- """
217
- for item in resources['equipment']:
218
- analysis += f"- {item}\n"
219
-
220
- analysis += """
221
- ---
222
-
223
- ## 🌱 Environmental Impact
224
-
225
- Cleaning this area will:
226
- - Prevent pollution from entering waterways
227
- - Protect local wildlife from harmful debris
228
- - Improve community health and aesthetics
229
- - Set a positive example for environmental stewardship
230
-
231
- **Recommendation:** Schedule a cleanup event and consider installing waste bins to prevent future littering.
232
- """
233
- status = f"✅ Analysis complete - {count} items detected"
234
-
235
- return annotated_image, analysis, status
236
-
237
- except Exception as e:
238
- logger.error(f"Analysis failed: {e}")
239
- return None, "", f"❌ Error: {str(e)}"
240
-
241
-
242
- # ============================================================================
243
- # CHAT FUNCTION
244
- # ============================================================================
245
-
246
- def chat_response(message: str, history: list) -> list:
247
- """
248
- Handle chat interactions.
249
-
250
- Args:
251
- message: User message
252
- history: Chat history (list of message dicts)
253
-
254
- Returns:
255
- Updated chat history
256
- """
257
- if not message.strip():
258
- history.append({
259
- "role": "assistant",
260
- "content": "Please ask a question about trash cleanup or environmental impact."
261
- })
262
- return history
263
-
264
- try:
265
- # Add user message to history
266
- history.append({"role": "user", "content": message})
267
-
268
- # Get LLM client
269
- llm = get_llm_client()
270
-
271
- # System prompt for chat
272
- system_prompt = """You are an environmental assistant helping with trash cleanup planning.
273
- Provide helpful, practical advice about:
274
- - Cleanup strategies and best practices
275
- - Environmental impact of different types of trash
276
- - Resource estimation and volunteer coordination
277
- - Community organizing tips
278
- - Waste prevention strategies
279
-
280
- Keep responses concise and actionable."""
281
-
282
- # Generate response
283
- response = llm.generate(
284
- message,
285
- system_prompt=system_prompt,
286
- max_tokens=500,
287
- temperature=0.7
288
- )
289
-
290
- # Add assistant response to history
291
- history.append({"role": "assistant", "content": response})
292
-
293
- return history
294
-
295
- except Exception as e:
296
- logger.error(f"Chat error: {e}")
297
- history.append({
298
- "role": "assistant",
299
- "content": f"Sorry, I encountered an error: {str(e)}"
300
- })
301
- return history
302
-
303
-
304
- # ============================================================================
305
- # GRADIO INTERFACE
306
- # ============================================================================
307
-
308
- def create_app() -> gr.Blocks:
309
- """Create the Gradio application."""
310
-
311
- with gr.Blocks(
312
- title=TITLE,
313
- css=CSS,
314
- theme=gr.themes.Soft(
315
- primary_hue="green",
316
- secondary_hue="blue"
317
- )
318
- ) as app:
319
-
320
- # Header
321
- gr.Markdown(f"# {TITLE}")
322
- gr.Markdown(DESCRIPTION)
323
-
324
- # Model info
325
- model_info = get_model_info()
326
- llm_info = get_llm_client().get_info()
327
-
328
- with gr.Accordion("ℹ️ System Information", open=False):
329
- gr.Markdown(f"""
330
- **YOLO Model:** {'✅ Loaded' if model_info['loaded'] else '❌ Not loaded'}
331
- **Detection Classes:** {model_info['num_classes']} ({', '.join(list(model_info['classes'].values())[:10])})
332
- **LLM Provider:** {llm_info['provider']} {'(online)' if llm_info['online'] else '(offline)'}
333
- **LLM Model:** {llm_info['model'] or 'Mock responses'}
334
- """)
335
-
336
- # Main content tabs
337
- with gr.Tabs():
338
-
339
- # Tab 1: Image Analysis
340
- with gr.Tab("📸 Analyze Image"):
341
- gr.Markdown("### Upload an image to detect trash and get cleanup recommendations")
342
-
343
- with gr.Row():
344
- with gr.Column(scale=1):
345
- input_image = gr.Image(
346
- label="Upload Image",
347
- type="pil",
348
- sources=["upload", "webcam"]
349
- )
350
-
351
- analyze_btn = gr.Button(
352
- "🔍 Analyze Image",
353
- variant="primary",
354
- elem_classes=["primary-btn"]
355
- )
356
-
357
- status_text = gr.Textbox(
358
- label="Status",
359
- interactive=False,
360
- lines=1
361
- )
362
-
363
- with gr.Column(scale=1):
364
- output_image = gr.Image(
365
- label="Detection Results",
366
- type="pil"
367
- )
368
-
369
- with gr.Row():
370
- analysis_output = gr.Markdown(label="Analysis Report")
371
-
372
- # Wire up the analysis button
373
- analyze_btn.click(
374
- fn=analyze_image,
375
- inputs=[input_image],
376
- outputs=[output_image, analysis_output, status_text]
377
- )
378
-
379
- # Tab 2: AI Chat
380
- with gr.Tab("💬 AI Assistant"):
381
- gr.Markdown("### Ask questions about cleanup planning and environmental impact")
382
-
383
- chatbot = gr.Chatbot(
384
- label="Chat with AI Assistant",
385
- height=500,
386
- type="messages"
387
- )
388
-
389
- msg = gr.Textbox(
390
- label="Your message",
391
- placeholder="Ask me about cleanup strategies, environmental impact, or organizing volunteers..."
392
- )
393
-
394
- with gr.Row():
395
- submit = gr.Button("Send", variant="primary", elem_classes=["secondary-btn"])
396
- clear = gr.Button("Clear Chat")
397
-
398
- # Chat interactions
399
- def user_submit(user_message, chat_history):
400
- return "", chat_response(user_message, chat_history)
401
-
402
- submit.click(
403
- fn=user_submit,
404
- inputs=[msg, chatbot],
405
- outputs=[msg, chatbot]
406
- )
407
-
408
- msg.submit(
409
- fn=user_submit,
410
- inputs=[msg, chatbot],
411
- outputs=[msg, chatbot]
412
- )
413
-
414
- clear.click(lambda: [], None, chatbot)
415
-
416
- # Tab 3: About
417
- with gr.Tab("ℹ️ About"):
418
- gr.Markdown("""
419
- ## About CleanCity Agent
420
-
421
- CleanCity Agent is an AI-powered platform for environmental cleanup planning.
422
-
423
- ### Features
424
-
425
- 🔍 **Smart Trash Detection**
426
- - Computer vision using YOLOv8/v11
427
- - Identifies multiple trash categories
428
- - Visual bounding boxes with confidence scores
429
-
430
- 📋 **Intelligent Cleanup Planning**
431
- - Automatic severity assessment
432
- - Resource estimation
433
- - Environmental impact analysis
434
-
435
- 💬 **AI Chat Assistant**
436
- - Get cleanup strategy advice
437
- - Learn about environmental impact
438
- - Community organizing tips
439
-
440
- ### Technology Stack
441
-
442
- - **Computer Vision:** Ultralytics YOLO
443
- - **LLM Providers:** Anthropic Claude, OpenAI GPT-4, Google Gemini
444
- - **Web Framework:** Gradio 5.x
445
- - **Model Context Protocol:** MCP integration
446
-
447
- ### Privacy
448
-
449
- All image processing happens locally. No images are uploaded to external servers
450
- (except LLM API calls if configured). Your privacy is our priority.
451
-
452
- ### Get Started
453
-
454
- 1. Upload an image using the "Analyze Image" tab
455
- 2. Click "Analyze Image" to detect trash
456
- 3. Review the cleanup plan and recommendations
457
- 4. Ask questions in the "AI Assistant" tab
458
-
459
- ---
460
-
461
- Built with ❤️ for a cleaner planet 🌍
462
- """)
463
-
464
- # Footer
465
- gr.Markdown("""
466
- ---
467
- 💡 **Tip:** Works best with clear images showing trash items. Try different angles for better detection!
468
- """)
469
-
470
- return app
471
-
472
-
473
- # ============================================================================
474
- # MAIN APPLICATION
475
- # ============================================================================
476
-
477
- if __name__ == "__main__":
478
- """Launch the Gradio application."""
479
-
480
- print("=" * 70)
481
- print("🌍 CleanCity Agent - Starting...")
482
- print("=" * 70)
483
-
484
- # Initialize LLM client
485
- print("\n📡 Initializing LLM client...")
486
- get_llm_client()
487
-
488
- # Create and launch app
489
- print("\n🚀 Launching Gradio interface...")
490
- app = create_app()
491
-
492
- # Launch settings from environment
493
- import os
494
- server_name = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1")
495
- server_port = int(os.getenv("GRADIO_SERVER_PORT", "7860"))
496
- share = os.getenv("GRADIO_SHARE", "false").lower() == "true"
497
-
498
- app.launch(
499
- server_name=server_name,
500
- server_port=server_port,
501
- share=share,
502
- show_api=False
503
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CleanCity Agent - Main Gradio Application
3
+
4
+ A user-friendly web interface for trash detection and cleanup planning.
5
+ """
6
+
7
+ import gradio as gr
8
+ from PIL import Image, ImageDraw, ImageFont
9
+ import io
10
+ import base64
11
+ from typing import Optional, Tuple
12
+
13
+ from agents.planner_agent import run_cleanup_workflow, analyze_hotspots
14
+ from tools.history_tool import query_events
15
+ from llm_client import get_llm_client
16
+
17
+
18
+ # ============================================================================
19
+ # UI CONSTANTS & STYLES
20
+ # ============================================================================
21
+
22
+ TITLE = "🌍 CleanCity Agent"
23
+ TAGLINE = "Spot trash. Plan action. Keep your city clean."
24
+
25
+ GUIDE_CONTENT = """
26
+ ## 📖 How to Use CleanCity Agent
27
+
28
+ ### Step 1 – Add a Photo
29
+ Upload a picture of a street, beach, park, or any place where there might be trash.
30
+ You can use your device's camera or select an existing image.
31
+
32
+ **Pro Tip:** Click the **📍 Get GPS** button to automatically capture your current location!
33
+
34
+ ### Step 2 – Let the AI Spot the Trash
35
+ Click **"Start Analysis"**. Our AI will:
36
+ - Identify trash items in your image using your trained YOLO model
37
+ - Draw bounding boxes around detected objects
38
+ - Classify the type of trash found
39
+
40
+ ### Step 3 Review the Cleanup Plan
41
+ Get an instant assessment including:
42
+ - **Severity level** (Low/Medium/High)
43
+ - Number of volunteers needed
44
+ - Estimated cleanup time
45
+ - Required equipment list
46
+ - Environmental impact summary
47
+
48
+ ### Step 4 – Save and Track
49
+ Save this event with a location to:
50
+ - Track trash patterns over time
51
+ - Identify recurring problem areas ("hotspots")
52
+ - Build evidence for city officials
53
+
54
+ ### Step 5 – Share or Report
55
+ Use the generated report to:
56
+ - Contact your city's environmental department
57
+ - Organize community cleanup events
58
+ - Document progress for grants or awareness campaigns
59
+
60
+ ---
61
+
62
+ ### 💡 Tips
63
+ - **Better photos = better detection**: Take clear, well-lit images
64
+ - **Add location details**: Helps track hotspots and patterns
65
+ - **Check History tab**: See trends and recurring problem areas
66
+ - **Chat with the agent**: Ask questions about your analysis
67
+
68
+ ### ⚠️ Limitations
69
+ This is an AI-powered prototype. Detection accuracy depends on image quality
70
+ and lighting conditions. Always verify results visually.
71
+ """
72
+
73
+ FAQ_CONTENT = """
74
+ ## ❓ Frequently Asked Questions
75
+
76
+ **Q: How does the trash detection work?**
77
+ A: We use computer vision models trained to recognize common litter items like
78
+ plastic bottles, bags, food wrappers, cigarette butts, and more.
79
+
80
+ **Q: Is my data stored or shared?**
81
+ A: All data is stored locally in your instance. We don't upload images or
82
+ personal information to external servers (except LLM API calls if you configure them).
83
+
84
+ **Q: What should I do if detection is inaccurate?**
85
+ A: The mock model provides random detections for demonstration. Replace with a
86
+ real model by updating `trash_model.py`. You can also add notes to manually
87
+ correct assessments.
88
+
89
+ **Q: Can I use this for large-scale city monitoring?**
90
+ A: This is a prototype designed for community groups and individual activists.
91
+ For large-scale deployment, consider:
92
+ - Integrating a production-grade detection model
93
+ - Setting up cloud hosting for data persistence
94
+ - Adding user authentication and role management
95
+
96
+ **Q: How can I contribute or report issues?**
97
+ A: Check the project repository for contribution guidelines and issue tracking.
98
+ """
99
+
100
+
101
+ # ============================================================================
102
+ # IMAGE PROCESSING FUNCTIONS
103
+ # ============================================================================
104
+
105
+ def draw_boxes_on_image(image: Image.Image, detections: list) -> Image.Image:
106
+ """Draw bounding boxes and labels on image."""
107
+ if not detections:
108
+ return image
109
+
110
+ img_copy = image.copy()
111
+ draw = ImageDraw.Draw(img_copy)
112
+
113
+ # Try to load a font, fall back to default if unavailable
114
+ try:
115
+ font = ImageFont.truetype("arial.ttf", 16)
116
+ except:
117
+ font = ImageFont.load_default()
118
+
119
+ for det in detections:
120
+ bbox = det["bbox"]
121
+ label = det["label"].replace("_", " ").title()
122
+ score = det["score"]
123
+
124
+ # Draw rectangle
125
+ draw.rectangle(bbox, outline="red", width=3)
126
+
127
+ # Draw label background
128
+ text = f"{label} ({score:.0%})"
129
+
130
+ # Get text bounding box for background
131
+ try:
132
+ text_bbox = draw.textbbox((bbox[0], bbox[1] - 20), text, font=font)
133
+ draw.rectangle(text_bbox, fill="red")
134
+ draw.text((bbox[0], bbox[1] - 20), text, fill="white", font=font)
135
+ except:
136
+ # Fallback for older Pillow versions
137
+ draw.text((bbox[0], bbox[1] - 20), text, fill="red", font=font)
138
+
139
+ return img_copy
140
+
141
+
142
+ def image_to_base64(image: Image.Image) -> str:
143
+ """Convert PIL Image to base64 string."""
144
+ buffered = io.BytesIO()
145
+ image.save(buffered, format="PNG")
146
+ return base64.b64encode(buffered.getvalue()).decode()
147
+
148
+
149
+ # ============================================================================
150
+ # CORE ANALYSIS FUNCTION
151
+ # ============================================================================
152
+
153
+ def analyze_image(
154
+ image: Optional[Image.Image],
155
+ location: str,
156
+ notes: str,
157
+ save_to_history: bool,
158
+ gps_coords: str
159
+ ) -> Tuple[Optional[Image.Image], str, str, str]:
160
+ """
161
+ Main analysis function called when user clicks "Start Analysis".
162
+
163
+ Returns:
164
+ - annotated_image: Image with bounding boxes
165
+ - detection_text: Detection results summary
166
+ - plan_text: Cleanup plan
167
+ - report_text: Generated report
168
+ """
169
+ if image is None:
170
+ return None, "⚠️ Please upload an image first.", "", ""
171
+
172
+ # Parse GPS coordinates if provided
173
+ latitude, longitude = None, None
174
+ if gps_coords and gps_coords.strip():
175
+ try:
176
+ parts = gps_coords.split(',')
177
+ if len(parts) == 2:
178
+ latitude = float(parts[0].strip())
179
+ longitude = float(parts[1].strip())
180
+ except:
181
+ pass # Invalid format, continue without coords
182
+
183
+ try:
184
+ # Run the full workflow
185
+ result = run_cleanup_workflow(
186
+ image=image,
187
+ location=location if location.strip() else None,
188
+ notes=notes if notes.strip() else None,
189
+ save_to_history=save_to_history,
190
+ use_llm_enhancement=False, # Can make this a checkbox
191
+ latitude=latitude,
192
+ longitude=longitude
193
+ )
194
+
195
+ if result["status"] == "no_trash":
196
+ return image, result["summary"], "", ""
197
+
198
+ # Draw boxes on image
199
+ annotated_image = draw_boxes_on_image(
200
+ image,
201
+ result["detection_results"]["detections"]
202
+ )
203
+
204
+ # Format detection results
205
+ detection_text = f"""### 🔍 Detection Results
206
+
207
+ {result['detection_results']['summary']}
208
+
209
+ **Items Detected:**
210
+ """
211
+ for det in result["detection_results"]["detections"]:
212
+ label = det["label"].replace("_", " ").title()
213
+ detection_text += f"- {label} (confidence: {det['score']:.0%})\n"
214
+
215
+ # Format plan
216
+ plan = result["plan"]
217
+ plan_text = f"""### 📋 Cleanup Plan
218
+
219
+ **Severity Level:** {plan['severity'].upper()}
220
+
221
+ **Resources Needed:**
222
+ - 👥 Volunteers: {plan['recommended_volunteers']}
223
+ - ⏱️ Estimated Time: {plan['estimated_time_minutes']} minutes
224
+ - 📅 Urgency: Within {plan['urgency_days']} day(s)
225
+
226
+ **Equipment:**
227
+ """
228
+ for item in plan['equipment_needed']:
229
+ plan_text += f"- {item}\n"
230
+
231
+ plan_text += f"\n**Environmental Impact:**\n{plan['environmental_impact']}\n"
232
+
233
+ if result.get("event_id"):
234
+ plan_text += f"\n✅ Saved! ID: {result['event_id']}"
235
+
236
+ # Return report
237
+ report_text = result["report"]
238
+
239
+ return annotated_image, detection_text, plan_text, report_text
240
+
241
+ except Exception as e:
242
+ error_msg = f"❌ Error during analysis: {str(e)}"
243
+ return image, error_msg, "", ""
244
+
245
+
246
+ # ============================================================================
247
+ # HISTORY & HOTSPOT FUNCTIONS
248
+ # ============================================================================
249
+
250
+ def load_history(days_filter: int, location_filter: str, severity_filter: str) -> str:
251
+ """Load and format event history."""
252
+ try:
253
+ # Apply filters
254
+ query_params = {"days": days_filter if days_filter > 0 else None}
255
+
256
+ if location_filter.strip():
257
+ query_params["location"] = location_filter.strip()
258
+
259
+ if severity_filter != "All":
260
+ query_params["severity"] = severity_filter.lower()
261
+
262
+ result = query_events(**query_params)
263
+
264
+ if not result["events"]:
265
+ return "No events found matching your filters."
266
+
267
+ # Format output
268
+ output = f"""### 📊 Event History
269
+
270
+ **Summary:**
271
+ - Total events: {result['summary']['total_events']}
272
+ - Total trash items: {result['summary']['total_trash_items']}
273
+ - Average per event: {result['summary']['avg_trash_per_event']:.1f}
274
+ - Unique locations: {result['summary']['unique_locations']}
275
+
276
+ ---
277
+
278
+ **Recent Events:**
279
+
280
+ """
281
+ for event in result["events"][:20]: # Show last 20
282
+ output += f"""
283
+ **Event #{event['id']}** - {event['timestamp'][:19]}
284
+ - Location: {event['location'] or 'Not specified'}
285
+ - Severity: {event['severity'].upper()}
286
+ - Items: {event['trash_count']}
287
+ - Categories: {', '.join(event['categories'])}
288
+ - Status: {'✅ Cleaned' if event['cleaned'] else '⏳ Pending'}
289
+ ---
290
+ """
291
+
292
+ return output
293
+
294
+ except Exception as e:
295
+ return f"❌ Error loading history: {str(e)}"
296
+
297
+
298
+ def load_hotspots(days: int) -> str:
299
+ """Load and format hotspot analysis."""
300
+ try:
301
+ result = analyze_hotspots(days=days)
302
+
303
+ if not result["hotspots"]:
304
+ return result.get("message", "No hotspots found.")
305
+
306
+ output = f"""### 🔥 Trash Hotspots Analysis
307
+
308
+ {result['recommendation']}
309
+
310
+ ---
311
+
312
+ **All Hotspots ({result['count']} locations):**
313
+
314
+ """
315
+ for i, hotspot in enumerate(result["hotspots"], 1):
316
+ output += f"""
317
+ **{i}. {hotspot['location']}**
318
+ - Events: {hotspot['event_count']}
319
+ - Total trash items: {hotspot['total_trash']}
320
+ - Average per event: {hotspot['avg_trash']:.1f}
321
+ - Last seen: {hotspot['last_event'][:19]}
322
+ - Severity levels: {hotspot['severities']}
323
+ ---
324
+ """
325
+
326
+ return output
327
+
328
+ except Exception as e:
329
+ return f"❌ Error analyzing hotspots: {str(e)}"
330
+
331
+
332
+ # ============================================================================
333
+ # CHATBOT FUNCTION
334
+ # ============================================================================
335
+
336
+ def chat_with_agent(message: str, history: list) -> str:
337
+ """Handle chat interactions with the CleanCity agent."""
338
+ try:
339
+ llm = get_llm_client()
340
+
341
+ # Build context from history
342
+ context = ""
343
+ for user_msg, bot_msg in history:
344
+ context += f"User: {user_msg}\nAgent: {bot_msg}\n"
345
+
346
+ # System prompt
347
+ system_prompt = """You are CleanCity Agent, a helpful AI assistant focused on
348
+ environmental cleanup and trash management. You help users:
349
+ - Understand their trash detection results
350
+ - Plan effective cleanup operations
351
+ - Organize community action
352
+ - Report issues to authorities
353
+ - Analyze environmental impact
354
+
355
+ Be friendly, practical, and encouraging. Keep responses concise but informative."""
356
+
357
+ prompt = f"{context}User: {message}\nAgent:"
358
+
359
+ response = llm.generate_text(
360
+ prompt,
361
+ system_prompt=system_prompt,
362
+ max_tokens=300,
363
+ temperature=0.7
364
+ )
365
+
366
+ return response
367
+
368
+ except Exception as e:
369
+ return f"I encountered an error: {str(e)}. Please try again or check your LLM configuration."
370
+
371
+
372
+ # ============================================================================
373
+ # GRADIO INTERFACE
374
+ # ============================================================================
375
+
376
+ def create_interface() -> gr.Blocks:
377
+ """Create and configure the Gradio interface."""
378
+
379
+ with gr.Blocks(
380
+ title="CleanCity Agent",
381
+ theme=gr.themes.Soft(primary_hue="green")
382
+ ) as app:
383
+ # Header
384
+ gr.Markdown(f"# {TITLE}")
385
+ gr.Markdown(f"*{TAGLINE}*")
386
+
387
+ with gr.Tabs():
388
+ # ================================================================
389
+ # TAB 1: MAIN ANALYSIS
390
+ # ================================================================
391
+ with gr.Tab("🔍 Analyze Image"):
392
+ with gr.Row():
393
+ with gr.Column(scale=1):
394
+ gr.Markdown("### Upload Image")
395
+ image_input = gr.Image(
396
+ type="pil",
397
+ label="Street/Beach/Park Image",
398
+ sources=["upload", "webcam"]
399
+ )
400
+
401
+ with gr.Row():
402
+ location_input = gr.Textbox(
403
+ label="Location (optional)",
404
+ placeholder="e.g., Main Street Park, Downtown Beach...",
405
+ lines=1,
406
+ scale=4
407
+ )
408
+ get_location_btn = gr.Button(
409
+ "📍 Get GPS",
410
+ size="sm",
411
+ scale=1
412
+ )
413
+
414
+ gps_coords = gr.Textbox(
415
+ label="GPS Coordinates",
416
+ placeholder="Latitude, Longitude (auto-filled when you click Get GPS)",
417
+ lines=1,
418
+ interactive=False,
419
+ visible=False
420
+ )
421
+
422
+ notes_input = gr.Textbox(
423
+ label="Notes (optional)",
424
+ placeholder="Any additional context...",
425
+ lines=2
426
+ )
427
+
428
+ save_history = gr.Checkbox(
429
+ label="Save to history",
430
+ value=True
431
+ )
432
+
433
+ analyze_btn = gr.Button(
434
+ "🚀 Start Analysis",
435
+ variant="primary",
436
+ size="lg"
437
+ )
438
+
439
+ with gr.Column(scale=1):
440
+ gr.Markdown("### Detection Results")
441
+ output_image = gr.Image(
442
+ type="pil",
443
+ label="Annotated Image"
444
+ )
445
+
446
+ gr.Markdown("---")
447
+
448
+ with gr.Row():
449
+ with gr.Column():
450
+ detection_output = gr.Markdown(label="Detections")
451
+
452
+ with gr.Column():
453
+ plan_output = gr.Markdown(label="Cleanup Plan")
454
+
455
+ gr.Markdown("### 📄 Generated Report")
456
+ report_output = gr.Textbox(
457
+ label="Email Report (copy & send to authorities)",
458
+ lines=15,
459
+ max_lines=20
460
+ )
461
+
462
+ # Wire up the analyze button
463
+ analyze_btn.click(
464
+ fn=analyze_image,
465
+ inputs=[image_input, location_input, notes_input, save_history, gps_coords],
466
+ outputs=[output_image, detection_output, plan_output, report_output]
467
+ )
468
+
469
+ # Wire up GPS button with JavaScript to get browser location
470
+ get_location_btn.click(
471
+ fn=None,
472
+ inputs=[],
473
+ outputs=[location_input, gps_coords],
474
+ js="""
475
+ async () => {
476
+ try {
477
+ const position = await new Promise((resolve, reject) => {
478
+ navigator.geolocation.getCurrentPosition(resolve, reject, {
479
+ enableHighAccuracy: true,
480
+ timeout: 10000
481
+ });
482
+ });
483
+
484
+ const lat = position.coords.latitude.toFixed(6);
485
+ const lon = position.coords.longitude.toFixed(6);
486
+ const coords = lat + ', ' + lon;
487
+
488
+ // Reverse geocode to get location name
489
+ try {
490
+ const response = await fetch(
491
+ `https://nominatim.openstreetmap.org/reverse?format=json&lat=${lat}&lon=${lon}`
492
+ );
493
+ const data = await response.json();
494
+ const location = data.display_name || `Location at ${coords}`;
495
+ return [location, coords];
496
+ } catch (e) {
497
+ return [`Location at ${coords}`, coords];
498
+ }
499
+ } catch (error) {
500
+ alert('GPS Error: ' + error.message + '\\n\\nPlease enable location services in your browser.');
501
+ return ['', ''];
502
+ }
503
+ }
504
+ """
505
+ )
506
+
507
+ # ================================================================
508
+ # TAB 2: USER GUIDE
509
+ # ================================================================
510
+ with gr.Tab("📖 How It Works"):
511
+ gr.Markdown(GUIDE_CONTENT)
512
+ gr.Markdown("---")
513
+ gr.Markdown(FAQ_CONTENT)
514
+
515
+ # ================================================================
516
+ # TAB 3: HISTORY
517
+ # ================================================================
518
+ with gr.Tab("📊 Event History"):
519
+ gr.Markdown("### View Past Trash Detection Events")
520
+
521
+ with gr.Row():
522
+ days_filter = gr.Slider(
523
+ minimum=0,
524
+ maximum=365,
525
+ value=30,
526
+ step=1,
527
+ label="Last N days (0 = all time)"
528
+ )
529
+ location_filter = gr.Textbox(
530
+ label="Filter by location (partial match)",
531
+ placeholder="e.g., Park"
532
+ )
533
+ severity_filter = gr.Dropdown(
534
+ choices=["All", "Low", "Medium", "High"],
535
+ value="All",
536
+ label="Filter by severity"
537
+ )
538
+
539
+ load_history_btn = gr.Button("🔄 Load History", variant="primary")
540
+ history_output = gr.Markdown()
541
+
542
+ load_history_btn.click(
543
+ fn=load_history,
544
+ inputs=[days_filter, location_filter, severity_filter],
545
+ outputs=history_output
546
+ )
547
+
548
+ # ================================================================
549
+ # TAB 4: HOTSPOTS
550
+ # ================================================================
551
+ with gr.Tab("🔥 Hotspot Analysis"):
552
+ gr.Markdown("### Identify Recurring Problem Areas")
553
+ gr.Markdown(
554
+ "Hotspots are locations with multiple trash events. "
555
+ "Use this to prioritize cleanup efforts and request permanent solutions."
556
+ )
557
+
558
+ hotspot_days = gr.Slider(
559
+ minimum=7,
560
+ maximum=365,
561
+ value=30,
562
+ step=7,
563
+ label="Analyze last N days"
564
+ )
565
+
566
+ load_hotspots_btn = gr.Button("🔍 Find Hotspots", variant="primary")
567
+ hotspots_output = gr.Markdown()
568
+
569
+ load_hotspots_btn.click(
570
+ fn=load_hotspots,
571
+ inputs=hotspot_days,
572
+ outputs=hotspots_output
573
+ )
574
+
575
+ # ================================================================
576
+ # TAB 5: CHAT WITH AGENT
577
+ # ================================================================
578
+ with gr.Tab("💬 Chat with Agent"):
579
+ gr.Markdown("### Ask Questions or Get Help")
580
+ gr.Markdown(
581
+ "Chat with the CleanCity Agent to get advice on cleanup strategies, "
582
+ "interpretation of results, or general environmental questions."
583
+ )
584
+
585
+ chatbot = gr.Chatbot(height=500)
586
+ msg = gr.Textbox(
587
+ label="Your message",
588
+ placeholder="Ask me anything about cleanup planning..."
589
+ )
590
+
591
+ with gr.Row():
592
+ submit = gr.Button("Send", variant="primary")
593
+ clear = gr.Button("Clear Chat")
594
+
595
+ def respond(message, chat_history):
596
+ bot_response = chat_with_agent(message, chat_history)
597
+ chat_history.append((message, bot_response))
598
+ return "", chat_history
599
+
600
+ submit.click(respond, [msg, chatbot], [msg, chatbot])
601
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
602
+ clear.click(lambda: [], None, chatbot)
603
+
604
+ # Footer
605
+ gr.Markdown("---")
606
+ gr.Markdown(
607
+ "*CleanCity Agent is a prototype tool for community environmental action. "
608
+ "Always verify AI results manually before taking action.*"
609
+ )
610
+
611
+ return app
612
+
613
+
614
+ # ============================================================================
615
+ # MAIN ENTRY POINT
616
+ # ============================================================================
617
+
618
+ # Create the Gradio app at module level for HuggingFace Spaces compatibility
619
+ print("=" * 60)
620
+ print("🌍 CleanCity Agent - Initializing...")
621
+ print("=" * 60)
622
+
623
+ # Initialize LLM client (will print status)
624
+ get_llm_client()
625
+
626
+ print("\n✓ Creating Gradio interface...")
627
+ app = create_interface()
628
+ print("✓ Gradio interface ready!")
629
+ print("=" * 60 + "\n")
630
+
631
+
632
+ def main():
633
+ """Launch the Gradio application (local development)."""
634
+ print("🚀 Launching web server...")
635
+ print("Access the app at: http://localhost:7860\n")
636
+
637
+ app.launch(
638
+ server_name="0.0.0.0", # Allow external connections
639
+ server_port=7860,
640
+ share=False, # Set to True to create public link
641
+ show_error=True
642
+ )
643
+
644
+
645
+ if __name__ == "__main__":
646
+ main()
llm_client.py CHANGED
@@ -1,368 +1,214 @@
1
- """
2
- CleanCity Agent - LLM Client
3
-
4
- Modern multi-provider LLM client supporting latest APIs:
5
- - Anthropic Claude (3.5 Sonnet, 3 Opus)
6
- - OpenAI (GPT-4o, GPT-4 Turbo)
7
- - Google Gemini (1.5 Pro, Flash)
8
-
9
- Features:
10
- - Automatic provider selection from environment
11
- - Graceful fallback to offline mode
12
- - Unified API across providers
13
- - Type hints and error handling
14
- - Streaming support (optional)
15
- """
16
-
17
- import os
18
- from typing import Optional, Literal
19
- from dotenv import load_dotenv
20
- import logging
21
-
22
- # Load environment variables
23
- load_dotenv()
24
-
25
- # Configure logging
26
- logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
27
- logger = logging.getLogger(__name__)
28
-
29
- # Type definitions
30
- LLMProvider = Literal["anthropic", "openai", "gemini", "offline"]
31
-
32
-
33
- class LLMClient:
34
- """
35
- Unified LLM client with multi-provider support.
36
-
37
- Automatically initializes the configured provider and falls back
38
- to offline mode if API keys are missing or packages unavailable.
39
- """
40
-
41
- def __init__(self, provider: Optional[str] = None):
42
- """
43
- Initialize LLM client.
44
-
45
- Args:
46
- provider: Override environment provider setting
47
- """
48
- self.provider: LLMProvider = (provider or os.getenv("LLM_PROVIDER", "offline")).lower()
49
- self._client = None
50
- self._initialize()
51
-
52
- def _initialize(self) -> None:
53
- """Initialize the selected LLM provider."""
54
- if self.provider == "anthropic":
55
- self._init_anthropic()
56
- elif self.provider == "openai":
57
- self._init_openai()
58
- elif self.provider == "gemini":
59
- self._init_gemini()
60
- else:
61
- self._init_offline()
62
-
63
- def _init_anthropic(self) -> None:
64
- """Initialize Anthropic Claude client."""
65
- api_key = os.getenv("ANTHROPIC_API_KEY")
66
-
67
- if not api_key:
68
- logger.warning("⚠️ ANTHROPIC_API_KEY not set, falling back to offline mode")
69
- self.provider = "offline"
70
- return
71
-
72
- try:
73
- import anthropic
74
- self._client = anthropic.Anthropic(api_key=api_key)
75
- self.model = os.getenv("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022")
76
- logger.info(f"✅ Anthropic Claude initialized (model: {self.model})")
77
- except ImportError:
78
- logger.warning("⚠️ Anthropic package not installed, falling back to offline mode")
79
- self.provider = "offline"
80
- except Exception as e:
81
- logger.error(f"❌ Anthropic initialization failed: {e}")
82
- self.provider = "offline"
83
-
84
- def _init_openai(self) -> None:
85
- """Initialize OpenAI client."""
86
- api_key = os.getenv("OPENAI_API_KEY")
87
-
88
- if not api_key:
89
- logger.warning("⚠️ OPENAI_API_KEY not set, falling back to offline mode")
90
- self.provider = "offline"
91
- return
92
-
93
- try:
94
- import openai
95
- self._client = openai.OpenAI(api_key=api_key)
96
- self.model = os.getenv("OPENAI_MODEL", "gpt-4o")
97
- logger.info(f"✅ OpenAI initialized (model: {self.model})")
98
- except ImportError:
99
- logger.warning("⚠️ OpenAI package not installed, falling back to offline mode")
100
- self.provider = "offline"
101
- except Exception as e:
102
- logger.error(f"❌ OpenAI initialization failed: {e}")
103
- self.provider = "offline"
104
-
105
- def _init_gemini(self) -> None:
106
- """Initialize Google Gemini client."""
107
- api_key = os.getenv("GEMINI_API_KEY")
108
-
109
- if not api_key:
110
- logger.warning("⚠️ GEMINI_API_KEY not set, falling back to offline mode")
111
- self.provider = "offline"
112
- return
113
-
114
- try:
115
- import google.generativeai as genai
116
- genai.configure(api_key=api_key)
117
- self.model = os.getenv("GEMINI_MODEL", "gemini-1.5-pro")
118
- self._client = genai.GenerativeModel(self.model)
119
- logger.info(f"✅ Google Gemini initialized (model: {self.model})")
120
- except ImportError:
121
- logger.warning("⚠️ Google GenAI package not installed, falling back to offline mode")
122
- self.provider = "offline"
123
- except Exception as e:
124
- logger.error(f"❌ Gemini initialization failed: {e}")
125
- self.provider = "offline"
126
-
127
- def _init_offline(self) -> None:
128
- """Initialize offline mode (mock responses)."""
129
- logger.info("ℹ️ Running in offline mode (mock LLM responses)")
130
- self.provider = "offline"
131
-
132
- def generate(
133
- self,
134
- prompt: str,
135
- system_prompt: Optional[str] = None,
136
- max_tokens: int = 2048,
137
- temperature: float = 0.7
138
- ) -> str:
139
- """
140
- Generate text completion.
141
-
142
- Args:
143
- prompt: User prompt/query
144
- system_prompt: Optional system instructions
145
- max_tokens: Maximum tokens to generate
146
- temperature: Sampling temperature (0.0-1.0)
147
-
148
- Returns:
149
- Generated text response
150
- """
151
- try:
152
- if self.provider == "anthropic":
153
- return self._generate_anthropic(prompt, system_prompt, max_tokens, temperature)
154
- elif self.provider == "openai":
155
- return self._generate_openai(prompt, system_prompt, max_tokens, temperature)
156
- elif self.provider == "gemini":
157
- return self._generate_gemini(prompt, system_prompt, max_tokens, temperature)
158
- else:
159
- return self._generate_offline(prompt)
160
- except Exception as e:
161
- logger.error(f"⚠️ LLM generation failed: {e}, using offline fallback")
162
- return self._generate_offline(prompt)
163
-
164
- def _generate_anthropic(
165
- self,
166
- prompt: str,
167
- system_prompt: Optional[str],
168
- max_tokens: int,
169
- temperature: float
170
- ) -> str:
171
- """Generate using Anthropic Claude."""
172
- messages = [{"role": "user", "content": prompt}]
173
-
174
- kwargs = {
175
- "model": self.model,
176
- "max_tokens": max_tokens,
177
- "temperature": temperature,
178
- "messages": messages
179
- }
180
-
181
- if system_prompt:
182
- kwargs["system"] = system_prompt
183
-
184
- response = self._client.messages.create(**kwargs)
185
- return response.content[0].text
186
-
187
- def _generate_openai(
188
- self,
189
- prompt: str,
190
- system_prompt: Optional[str],
191
- max_tokens: int,
192
- temperature: float
193
- ) -> str:
194
- """Generate using OpenAI GPT."""
195
- messages = []
196
- if system_prompt:
197
- messages.append({"role": "system", "content": system_prompt})
198
- messages.append({"role": "user", "content": prompt})
199
-
200
- response = self._client.chat.completions.create(
201
- model=self.model,
202
- messages=messages,
203
- max_tokens=max_tokens,
204
- temperature=temperature
205
- )
206
- return response.choices[0].message.content
207
-
208
- def _generate_gemini(
209
- self,
210
- prompt: str,
211
- system_prompt: Optional[str],
212
- max_tokens: int,
213
- temperature: float
214
- ) -> str:
215
- """Generate using Google Gemini."""
216
- full_prompt = prompt
217
- if system_prompt:
218
- full_prompt = f"{system_prompt}\n\n{prompt}"
219
-
220
- generation_config = {
221
- "max_output_tokens": max_tokens,
222
- "temperature": temperature,
223
- }
224
-
225
- response = self._client.generate_content(
226
- full_prompt,
227
- generation_config=generation_config
228
- )
229
- return response.text
230
-
231
- def _generate_offline(self, prompt: str) -> str:
232
- """Generate mock response in offline mode."""
233
- # Simple keyword-based mock responses
234
- prompt_lower = prompt.lower()
235
-
236
- if "cleanup" in prompt_lower or "plan" in prompt_lower:
237
- return """Based on the detected trash, here's a recommended cleanup plan:
238
-
239
- **Severity**: Medium
240
- **Volunteers Needed**: 3-5 people
241
- **Estimated Time**: 1-2 hours
242
- **Equipment**:
243
- - Trash bags (heavy duty)
244
- - Gloves
245
- - Grabber tools
246
- - Safety vests
247
-
248
- **Environmental Impact**: Removing this trash will help prevent pollution and protect local wildlife.
249
-
250
- **Recommendations**:
251
- - Coordinate with local environmental groups
252
- - Consider installing waste bins in this area
253
- - Plan regular cleanup events"""
254
-
255
- elif "report" in prompt_lower:
256
- return """# Cleanup Report
257
-
258
- **Location**: [Location from data]
259
- **Date**: [Current date]
260
- **Trash Detected**: [Count] items
261
-
262
- This area requires attention to prevent environmental degradation. Recommend scheduling a community cleanup event and installing proper waste disposal infrastructure.
263
-
264
- **Next Steps**:
265
- - Notify local authorities
266
- - Organize volunteer cleanup
267
- - Monitor area for recurring issues"""
268
-
269
- elif "chat" in prompt_lower or "question" in prompt_lower:
270
- return """I'm here to help with trash cleanup planning! I can:
271
- - Analyze detected trash and recommend cleanup strategies
272
- - Estimate resources needed for cleanup
273
- - Generate reports for authorities
274
- - Provide environmental impact assessments
275
- - Suggest prevention measures
276
-
277
- What would you like to know?"""
278
-
279
- else:
280
- return "I'm running in offline mode. For full AI capabilities, configure an API key in the .env file."
281
-
282
- def is_online(self) -> bool:
283
- """Check if using a real LLM provider (not offline)."""
284
- return self.provider != "offline"
285
-
286
- def get_info(self) -> dict:
287
- """Get client information."""
288
- return {
289
- "provider": self.provider,
290
- "online": self.is_online(),
291
- "model": getattr(self, "model", None)
292
- }
293
-
294
-
295
- # ============================================================================
296
- # Global Instance & Public API
297
- # ============================================================================
298
-
299
- _global_client: Optional[LLMClient] = None
300
-
301
-
302
- def get_llm_client(provider: Optional[str] = None) -> LLMClient:
303
- """
304
- Get or create the global LLM client instance.
305
-
306
- Args:
307
- provider: Override default provider
308
-
309
- Returns:
310
- LLMClient instance
311
- """
312
- global _global_client
313
- if _global_client is None or provider is not None:
314
- _global_client = LLMClient(provider)
315
- return _global_client
316
-
317
-
318
- def generate_text(
319
- prompt: str,
320
- system_prompt: Optional[str] = None,
321
- **kwargs
322
- ) -> str:
323
- """
324
- Generate text using the global LLM client.
325
-
326
- Args:
327
- prompt: User prompt
328
- system_prompt: System instructions
329
- **kwargs: Additional generation parameters
330
-
331
- Returns:
332
- Generated text
333
- """
334
- client = get_llm_client()
335
- return client.generate(prompt, system_prompt, **kwargs)
336
-
337
-
338
- # ============================================================================
339
- # Module Test
340
- # ============================================================================
341
-
342
- if __name__ == "__main__":
343
- """Test the LLM client module."""
344
- print("=" * 70)
345
- print("Testing CleanCity LLM Client")
346
- print("=" * 70)
347
-
348
- # Test initialization
349
- print("\n1. Initializing LLM client...")
350
- client = get_llm_client()
351
-
352
- info = client.get_info()
353
- print(f" Provider: {info['provider']}")
354
- print(f" Online: {info['online']}")
355
- print(f" Model: {info['model']}")
356
-
357
- # Test text generation
358
- print("\n2. Testing text generation...")
359
- test_prompt = "Generate a brief cleanup plan for a beach with plastic bottles."
360
-
361
- print(f" Prompt: {test_prompt}")
362
- print("\n Response:")
363
- response = client.generate(test_prompt, max_tokens=300)
364
- print(f" {response[:200]}...")
365
-
366
- print("\n" + "=" * 70)
367
- print("✅ LLM Client test complete!")
368
- print("=" * 70)
 
1
+ """
2
+ LLM Client Abstraction Layer
3
+
4
+ Provides unified interface to multiple LLM providers (Anthropic, OpenAI, Google Gemini).
5
+ Includes offline fallback mode for demos without API keys.
6
+
7
+ Configuration via environment variables:
8
+ - LLM_PROVIDER: "anthropic" | "openai" | "gemini" | "offline"
9
+ - ANTHROPIC_API_KEY
10
+ - OPENAI_API_KEY
11
+ - GEMINI_API_KEY
12
+ """
13
+
14
+ import os
15
+ from typing import Optional
16
+ from dotenv import load_dotenv
17
+
18
+ load_dotenv()
19
+
20
+
21
+ class LLMClient:
22
+ """Unified LLM client with multi-provider support and offline mode."""
23
+
24
+ def __init__(self):
25
+ self.provider = os.getenv("LLM_PROVIDER", "offline").lower()
26
+ self._client = None
27
+ self._initialize_client()
28
+
29
+ def _initialize_client(self):
30
+ """Initialize the appropriate LLM client based on provider setting."""
31
+ if self.provider == "anthropic":
32
+ api_key = os.getenv("ANTHROPIC_API_KEY")
33
+ if api_key:
34
+ try:
35
+ import anthropic
36
+ self._client = anthropic.Anthropic(api_key=api_key)
37
+ print("✓ Anthropic client initialized")
38
+ except ImportError:
39
+ print("⚠ Anthropic package not installed, falling back to offline mode")
40
+ self.provider = "offline"
41
+ else:
42
+ print("⚠ ANTHROPIC_API_KEY not set, using offline mode")
43
+ self.provider = "offline"
44
+
45
+ elif self.provider == "openai":
46
+ api_key = os.getenv("OPENAI_API_KEY")
47
+ if api_key:
48
+ try:
49
+ import openai
50
+ self._client = openai.OpenAI(api_key=api_key)
51
+ print("✓ OpenAI client initialized")
52
+ except ImportError:
53
+ print(" OpenAI package not installed, falling back to offline mode")
54
+ self.provider = "offline"
55
+ else:
56
+ print("⚠ OPENAI_API_KEY not set, using offline mode")
57
+ self.provider = "offline"
58
+
59
+ elif self.provider == "gemini":
60
+ api_key = os.getenv("GEMINI_API_KEY")
61
+ if api_key:
62
+ try:
63
+ import google.generativeai as genai
64
+ genai.configure(api_key=api_key)
65
+ self._client = genai.GenerativeModel('gemini-pro')
66
+ print("✓ Gemini client initialized")
67
+ except ImportError:
68
+ print("⚠ Google GenAI package not installed, falling back to offline mode")
69
+ self.provider = "offline"
70
+ else:
71
+ print("⚠ GEMINI_API_KEY not set, using offline mode")
72
+ self.provider = "offline"
73
+
74
+ else:
75
+ print("ℹ Running in offline mode (mock responses)")
76
+ self.provider = "offline"
77
+
78
+ def generate_text(
79
+ self,
80
+ prompt: str,
81
+ system_prompt: Optional[str] = None,
82
+ max_tokens: int = 1024,
83
+ temperature: float = 0.7
84
+ ) -> str:
85
+ """
86
+ Generate text completion from the configured LLM.
87
+
88
+ Args:
89
+ prompt: User prompt/query
90
+ system_prompt: Optional system instructions
91
+ max_tokens: Maximum tokens to generate
92
+ temperature: Sampling temperature (0-1)
93
+
94
+ Returns:
95
+ Generated text response
96
+ """
97
+ if self.provider == "offline":
98
+ return self._offline_response(prompt)
99
+
100
+ try:
101
+ if self.provider == "anthropic":
102
+ return self._anthropic_generate(prompt, system_prompt, max_tokens, temperature)
103
+ elif self.provider == "openai":
104
+ return self._openai_generate(prompt, system_prompt, max_tokens, temperature)
105
+ elif self.provider == "gemini":
106
+ return self._gemini_generate(prompt, system_prompt, max_tokens, temperature)
107
+ except Exception as e:
108
+ print(f"⚠ LLM API error: {e}, falling back to offline mode")
109
+ return self._offline_response(prompt)
110
+
111
+ return self._offline_response(prompt)
112
+
113
+ def _anthropic_generate(self, prompt: str, system_prompt: Optional[str], max_tokens: int, temperature: float) -> str:
114
+ """Generate using Anthropic Claude."""
115
+ messages = [{"role": "user", "content": prompt}]
116
+
117
+ kwargs = {
118
+ "model": "claude-3-5-sonnet-20241022",
119
+ "max_tokens": max_tokens,
120
+ "temperature": temperature,
121
+ "messages": messages
122
+ }
123
+
124
+ if system_prompt:
125
+ kwargs["system"] = system_prompt
126
+
127
+ response = self._client.messages.create(**kwargs)
128
+ return response.content[0].text
129
+
130
+ def _openai_generate(self, prompt: str, system_prompt: Optional[str], max_tokens: int, temperature: float) -> str:
131
+ """Generate using OpenAI GPT."""
132
+ messages = []
133
+ if system_prompt:
134
+ messages.append({"role": "system", "content": system_prompt})
135
+ messages.append({"role": "user", "content": prompt})
136
+
137
+ response = self._client.chat.completions.create(
138
+ model="gpt-4o",
139
+ messages=messages,
140
+ max_tokens=max_tokens,
141
+ temperature=temperature
142
+ )
143
+ return response.choices[0].message.content
144
+
145
+ def _gemini_generate(self, prompt: str, system_prompt: Optional[str], max_tokens: int, temperature: float) -> str:
146
+ """Generate using Google Gemini."""
147
+ full_prompt = prompt
148
+ if system_prompt:
149
+ full_prompt = f"{system_prompt}\n\n{prompt}"
150
+
151
+ response = self._client.generate_content(
152
+ full_prompt,
153
+ generation_config={
154
+ "max_output_tokens": max_tokens,
155
+ "temperature": temperature
156
+ }
157
+ )
158
+ return response.text
159
+
160
+ def _offline_response(self, prompt: str) -> str:
161
+ """Generate mock responses for offline/demo mode."""
162
+ prompt_lower = prompt.lower()
163
+
164
+ # Pattern matching for common requests
165
+ if "cleanup" in prompt_lower or "plan" in prompt_lower:
166
+ return """Based on the detected trash, here's a recommended cleanup plan:
167
+
168
+ **Severity Level**: Medium
169
+
170
+ **Recommended Actions**:
171
+ - Deploy 2-3 volunteers with standard cleanup equipment
172
+ - Estimated time: 45-60 minutes
173
+ - Bring 3-4 heavy-duty trash bags
174
+ - Consider gloves and grabber tools for safety
175
+
176
+ **Environmental Impact**: Moderate littering with potential harm to local wildlife if not addressed.
177
+
178
+ **Urgency**: Should be cleaned within 1-2 days to prevent accumulation."""
179
+
180
+ elif "report" in prompt_lower:
181
+ return """**Trash Cleanup Report**
182
+
183
+ Location: [User-specified location]
184
+ Date: [Current date]
185
+
186
+ We have identified significant litter accumulation requiring attention. The area contains multiple pieces of trash including plastic bottles, food wrappers, and other waste materials.
187
+
188
+ Recommended immediate action by city services to maintain public health and environmental standards.
189
+
190
+ Contact: [Your community group]"""
191
+
192
+ elif "severity" in prompt_lower or "analyze" in prompt_lower:
193
+ return "Based on the number and type of trash items detected, this appears to be a **medium severity** situation requiring attention within 1-2 days."
194
+
195
+ else:
196
+ return """I've analyzed the image and detected several trash items. The situation requires moderate attention with a cleanup effort estimated at 2-3 volunteers for about 45-60 minutes. This will help maintain the cleanliness and environmental health of the area."""
197
+
198
+
199
+ # Global singleton instance
200
+ _llm_client: Optional[LLMClient] = None
201
+
202
+
203
+ def get_llm_client() -> LLMClient:
204
+ """Get or create the global LLM client instance."""
205
+ global _llm_client
206
+ if _llm_client is None:
207
+ _llm_client = LLMClient()
208
+ return _llm_client
209
+
210
+
211
+ def generate_text(prompt: str, system_prompt: Optional[str] = None, **kwargs) -> str:
212
+ """Convenience function for text generation."""
213
+ client = get_llm_client()
214
+ return client.generate_text(prompt, system_prompt, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mcp_server.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MCP Server for CleanCity Agent
3
+
4
+ Exposes trash detection and cleanup planning tools via the Model Context Protocol.
5
+ Can be used by Claude Desktop or other MCP clients.
6
+ """
7
+
8
+ from mcp.server import Server
9
+ from mcp.types import Tool, TextContent
10
+ import mcp.server.stdio
11
+ import json
12
+ from typing import Any
13
+
14
+ from tools import (
15
+ detect_trash_mcp,
16
+ plan_cleanup,
17
+ log_event,
18
+ query_events,
19
+ generate_report
20
+ )
21
+ from tools.history_tool import get_hotspots, mark_cleaned
22
+
23
+
24
+ # Initialize MCP server
25
+ server = Server("cleancity-agent")
26
+
27
+
28
+ @server.list_tools()
29
+ async def list_tools() -> list[Tool]:
30
+ """List available MCP tools."""
31
+ return [
32
+ Tool(
33
+ name="detect_trash",
34
+ description="Detect trash objects in an image using computer vision. Returns bounding boxes, labels, and confidence scores.",
35
+ inputSchema={
36
+ "type": "object",
37
+ "properties": {
38
+ "image_data": {
39
+ "type": "string",
40
+ "description": "Base64 encoded image data"
41
+ }
42
+ },
43
+ "required": ["image_data"]
44
+ }
45
+ ),
46
+ Tool(
47
+ name="plan_cleanup",
48
+ description="Generate a cleanup action plan based on detected trash. Returns severity level, resource requirements, and recommendations.",
49
+ inputSchema={
50
+ "type": "object",
51
+ "properties": {
52
+ "detections": {
53
+ "type": "array",
54
+ "description": "Array of trash detection objects from detect_trash",
55
+ "items": {"type": "object"}
56
+ },
57
+ "location": {
58
+ "type": "string",
59
+ "description": "Location description (optional)"
60
+ },
61
+ "notes": {
62
+ "type": "string",
63
+ "description": "Additional context or notes (optional)"
64
+ },
65
+ "use_llm": {
66
+ "type": "boolean",
67
+ "description": "Use LLM for enhanced planning (optional, default: false)"
68
+ }
69
+ },
70
+ "required": ["detections"]
71
+ }
72
+ ),
73
+ Tool(
74
+ name="log_event",
75
+ description="Log a trash detection event to the history database for tracking and analysis.",
76
+ inputSchema={
77
+ "type": "object",
78
+ "properties": {
79
+ "detections": {
80
+ "type": "array",
81
+ "description": "Array of trash detection objects",
82
+ "items": {"type": "object"}
83
+ },
84
+ "severity": {
85
+ "type": "string",
86
+ "description": "Severity level: low, medium, or high"
87
+ },
88
+ "location": {
89
+ "type": "string",
90
+ "description": "Location description (optional)"
91
+ },
92
+ "notes": {
93
+ "type": "string",
94
+ "description": "User notes (optional)"
95
+ }
96
+ },
97
+ "required": ["detections", "severity"]
98
+ }
99
+ ),
100
+ Tool(
101
+ name="query_events",
102
+ description="Query trash events from history database with filtering options. Useful for finding patterns and hotspots.",
103
+ inputSchema={
104
+ "type": "object",
105
+ "properties": {
106
+ "days": {
107
+ "type": "integer",
108
+ "description": "Only events from last N days (optional)"
109
+ },
110
+ "location": {
111
+ "type": "string",
112
+ "description": "Filter by location (partial match, optional)"
113
+ },
114
+ "severity": {
115
+ "type": "string",
116
+ "description": "Filter by severity: low, medium, high (optional)"
117
+ },
118
+ "limit": {
119
+ "type": "integer",
120
+ "description": "Maximum results to return (default: 100)"
121
+ }
122
+ }
123
+ }
124
+ ),
125
+ Tool(
126
+ name="get_hotspots",
127
+ description="Identify locations with recurring trash problems based on historical data.",
128
+ inputSchema={
129
+ "type": "object",
130
+ "properties": {
131
+ "min_events": {
132
+ "type": "integer",
133
+ "description": "Minimum events to qualify as hotspot (default: 2)"
134
+ },
135
+ "days": {
136
+ "type": "integer",
137
+ "description": "Time window in days (optional, default: 30)"
138
+ }
139
+ }
140
+ }
141
+ ),
142
+ Tool(
143
+ name="generate_report",
144
+ description="Generate a formatted report for trash detection event, suitable for city authorities or documentation.",
145
+ inputSchema={
146
+ "type": "object",
147
+ "properties": {
148
+ "detections": {
149
+ "type": "array",
150
+ "description": "Array of trash detection objects",
151
+ "items": {"type": "object"}
152
+ },
153
+ "severity": {
154
+ "type": "string",
155
+ "description": "Severity level"
156
+ },
157
+ "location": {
158
+ "type": "string",
159
+ "description": "Location description (optional)"
160
+ },
161
+ "notes": {
162
+ "type": "string",
163
+ "description": "Additional notes (optional)"
164
+ },
165
+ "plan": {
166
+ "type": "object",
167
+ "description": "Cleanup plan object (optional)"
168
+ },
169
+ "format": {
170
+ "type": "string",
171
+ "description": "Report format: email, markdown, or plain (default: email)"
172
+ }
173
+ },
174
+ "required": ["detections", "severity"]
175
+ }
176
+ ),
177
+ Tool(
178
+ name="mark_cleaned",
179
+ description="Mark a logged event as cleaned/resolved.",
180
+ inputSchema={
181
+ "type": "object",
182
+ "properties": {
183
+ "event_id": {
184
+ "type": "integer",
185
+ "description": "Database event ID to mark as cleaned"
186
+ }
187
+ },
188
+ "required": ["event_id"]
189
+ }
190
+ )
191
+ ]
192
+
193
+
194
+ @server.call_tool()
195
+ async def call_tool(name: str, arguments) -> list[TextContent]:
196
+ """Handle tool execution."""
197
+ try:
198
+ if name == "detect_trash":
199
+ result = detect_trash_mcp(arguments["image_data"])
200
+
201
+ elif name == "plan_cleanup":
202
+ result = plan_cleanup(
203
+ detections=arguments["detections"],
204
+ location=arguments.get("location"),
205
+ notes=arguments.get("notes"),
206
+ use_llm=arguments.get("use_llm", False)
207
+ )
208
+
209
+ elif name == "log_event":
210
+ result = log_event(
211
+ detections=arguments["detections"],
212
+ severity=arguments["severity"],
213
+ location=arguments.get("location"),
214
+ notes=arguments.get("notes")
215
+ )
216
+
217
+ elif name == "query_events":
218
+ result = query_events(
219
+ days=arguments.get("days"),
220
+ location=arguments.get("location"),
221
+ severity=arguments.get("severity"),
222
+ limit=arguments.get("limit", 100)
223
+ )
224
+
225
+ elif name == "get_hotspots":
226
+ result = get_hotspots(
227
+ min_events=arguments.get("min_events", 2),
228
+ days=arguments.get("days", 30)
229
+ )
230
+
231
+ elif name == "generate_report":
232
+ result = generate_report(
233
+ detections=arguments["detections"],
234
+ severity=arguments["severity"],
235
+ location=arguments.get("location"),
236
+ notes=arguments.get("notes"),
237
+ plan=arguments.get("plan"),
238
+ format=arguments.get("format", "email")
239
+ )
240
+
241
+ elif name == "mark_cleaned":
242
+ result = mark_cleaned(arguments["event_id"])
243
+
244
+ else:
245
+ return [TextContent(
246
+ type="text",
247
+ text=f"Unknown tool: {name}"
248
+ )]
249
+
250
+ # Return result as JSON
251
+ return [TextContent(
252
+ type="text",
253
+ text=json.dumps(result, indent=2)
254
+ )]
255
+
256
+ except Exception as e:
257
+ return [TextContent(
258
+ type="text",
259
+ text=f"Error executing {name}: {str(e)}"
260
+ )]
261
+
262
+
263
+ async def main():
264
+ """Run the MCP server."""
265
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
266
+ await server.run(
267
+ read_stream,
268
+ write_stream,
269
+ server.create_initialization_options()
270
+ )
271
+
272
+
273
+ if __name__ == "__main__":
274
+ import asyncio
275
+ asyncio.run(main())
requirements.txt CHANGED
@@ -1,49 +1,32 @@
1
- # CleanCity Agent - Modern Dependencies
2
- # Python 3.11+ required
3
- # Last updated: November 2025
4
-
5
- # ============================================================================
6
- # Core Web Framework
7
- # ============================================================================
8
- gradio==5.9.1 # Latest Gradio for modern web UI
9
-
10
- # ============================================================================
11
- # Computer Vision & AI
12
- # ============================================================================
13
- ultralytics>=8.3.0 # YOLOv8/v11 for trash detection
14
- torch>=2.0.0 # PyTorch (YOLO dependency)
15
- torchvision>=0.15.0 # torchvision
16
- opencv-python>=4.8.0 # OpenCV for image processing
17
-
18
- # ============================================================================
19
- # Image Processing
20
- # ============================================================================
21
- pillow>=10.0.0 # PIL for image handling
22
- numpy>=1.24.0 # NumPy
23
- matplotlib>=3.7.0 # Matplotlib for visualizations
24
-
25
- # ============================================================================
26
- # LLM APIs (Latest SDKs)
27
- # ============================================================================
28
- anthropic>=0.25.0 # Anthropic Claude SDK
29
- openai>=1.0.0 # OpenAI SDK
30
- google-generativeai>=0.3.0 # Google Gemini SDK
31
-
32
- # ============================================================================
33
- # MCP (Model Context Protocol)
34
- # ============================================================================
35
- mcp>=1.1.2 # Latest MCP for Claude Desktop integration
36
-
37
- # ============================================================================
38
- # Utilities
39
- # ============================================================================
40
- python-dotenv>=1.0.0 # Environment variable management
41
- python-dateutil>=2.8.0 # Date/time utilities
42
- httpx>=0.24.0 # Modern async HTTP client
43
-
44
- # ============================================================================
45
- # Development (Optional)
46
- # ============================================================================
47
- # pytest>=8.3.3 # For testing
48
- # black>=24.10.0 # Code formatting
49
- # ruff>=0.8.0 # Fast Python linter
 
1
+ # CleanCity Agent - Production Dependencies
2
+ # Python 3.11+ required
3
+
4
+ # Core UI Framework
5
+ gradio==5.9.1
6
+
7
+ # MCP (Model Context Protocol) - using compatible version without fastmcp
8
+ mcp==1.1.2
9
+
10
+ # Image Processing
11
+ pillow>=10.4.0
12
+
13
+ # LLM Clients
14
+ anthropic>=0.39.0
15
+ openai>=1.54.0
16
+ google-generativeai>=0.8.0
17
+
18
+ # Environment & Configuration
19
+ python-dotenv>=1.0.0
20
+
21
+ # Data Visualization (for overlays)
22
+ matplotlib>=3.9.0
23
+ numpy>=2.1.0
24
+
25
+ # HTTP client for potential external APIs
26
+ httpx>=0.28.0
27
+
28
+ # Computer Vision - YOLO for trash detection
29
+ ultralytics>=8.0.0
30
+
31
+ # Database (SQLite is stdlib, but we'll use this for better datetime handling)
32
+ python-dateutil>=2.9.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tools/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools package for CleanCity Agent MCP server."""
2
+
3
+ from .trash_detection_tool import detect_trash_mcp
4
+ from .cleanup_planner_tool import plan_cleanup
5
+ from .history_tool import log_event, query_events
6
+ from .report_generator_tool import generate_report
7
+
8
+ __all__ = [
9
+ "detect_trash_mcp",
10
+ "plan_cleanup",
11
+ "log_event",
12
+ "query_events",
13
+ "generate_report"
14
+ ]
tools/cleanup_planner_tool.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cleanup Planning MCP Tool
3
+
4
+ Analyzes detected trash and generates cleanup action plans.
5
+ """
6
+
7
+ from typing import Any, Optional
8
+ from trash_model import Detection
9
+ import llm_client
10
+
11
+
12
+ def plan_cleanup(
13
+ detections: list[Detection],
14
+ location: Optional[str] = None,
15
+ notes: Optional[str] = None,
16
+ use_llm: bool = False
17
+ ):
18
+ """
19
+ Generate a cleanup plan based on detected trash.
20
+
21
+ Args:
22
+ detections: List of trash detections from detection tool
23
+ location: Optional location description
24
+ notes: Optional additional context
25
+ use_llm: Whether to use LLM for enhanced planning (vs rule-based)
26
+
27
+ Returns:
28
+ Dict containing:
29
+ - severity: "low" | "medium" | "high"
30
+ - recommended_volunteers: int
31
+ - estimated_time_minutes: int
32
+ - equipment_needed: list[str]
33
+ - urgency_days: int (recommended action timeframe)
34
+ - environmental_impact: str
35
+ - action_summary: str
36
+ """
37
+ if not detections:
38
+ return {
39
+ "severity": "low",
40
+ "recommended_volunteers": 0,
41
+ "estimated_time_minutes": 0,
42
+ "equipment_needed": [],
43
+ "urgency_days": 0,
44
+ "environmental_impact": "No trash detected - area appears clean.",
45
+ "action_summary": "No cleanup action needed at this time."
46
+ }
47
+
48
+ # Rule-based analysis
49
+ count = len(detections)
50
+ avg_confidence = sum(d["score"] for d in detections) / count
51
+ categories = set(d["label"] for d in detections)
52
+
53
+ # Calculate severity
54
+ if count >= 15 or len(categories) >= 6:
55
+ severity = "high"
56
+ urgency_days = 1
57
+ volunteers = 4 + (count // 10)
58
+ time_estimate = 90 + (count * 3)
59
+ elif count >= 7 or len(categories) >= 4:
60
+ severity = "medium"
61
+ urgency_days = 3
62
+ volunteers = 2 + (count // 8)
63
+ time_estimate = 45 + (count * 2)
64
+ else:
65
+ severity = "low"
66
+ urgency_days = 7
67
+ volunteers = 1 + (count // 5)
68
+ time_estimate = 20 + (count * 2)
69
+
70
+ # Equipment recommendations
71
+ equipment = ["Heavy-duty trash bags", "Gloves", "Grabber tools"]
72
+
73
+ if "glass_bottle" in categories:
74
+ equipment.append("Safety goggles")
75
+ equipment.append("Puncture-resistant bags")
76
+
77
+ if count > 10:
78
+ equipment.append("Wheeled collection bin")
79
+
80
+ # Environmental impact assessment
81
+ impact_descriptions = {
82
+ "high": "Significant environmental concern. Risk of wildlife harm, water contamination, and community health issues. Immediate action recommended.",
83
+ "medium": "Moderate environmental impact. Potential for wildlife interaction and visual pollution. Timely cleanup will prevent escalation.",
84
+ "low": "Minor environmental impact. Early intervention will maintain area cleanliness and prevent accumulation."
85
+ }
86
+
87
+ plan = {
88
+ "severity": severity,
89
+ "recommended_volunteers": volunteers,
90
+ "estimated_time_minutes": time_estimate,
91
+ "equipment_needed": equipment,
92
+ "urgency_days": urgency_days,
93
+ "environmental_impact": impact_descriptions[severity],
94
+ "action_summary": _generate_action_summary(
95
+ severity, volunteers, time_estimate, count, categories
96
+ )
97
+ }
98
+
99
+ # Optionally enhance with LLM
100
+ if use_llm:
101
+ plan["action_summary"] = _enhance_with_llm(plan, detections, location, notes)
102
+
103
+ return plan
104
+
105
+
106
+ def _generate_action_summary(
107
+ severity: str,
108
+ volunteers: int,
109
+ time_minutes: int,
110
+ count: int,
111
+ categories: set[str]
112
+ ) -> str:
113
+ """Generate human-readable action summary."""
114
+ category_list = ", ".join(sorted(categories)[:3])
115
+ if len(categories) > 3:
116
+ category_list += f" and {len(categories) - 3} other types"
117
+
118
+ summary = f"""**Cleanup Plan - {severity.upper()} Priority**
119
+
120
+ Detected {count} trash items including {category_list}.
121
+
122
+ **Recommended Resources:**
123
+ - {volunteers} volunteer(s)
124
+ - Approximately {time_minutes} minutes
125
+ - Standard cleanup equipment
126
+
127
+ **Next Steps:**
128
+ 1. Gather volunteers and equipment
129
+ 2. Coordinate cleanup date/time
130
+ 3. Execute cleanup operation
131
+ 4. Dispose of collected waste properly
132
+ 5. Document completion for tracking
133
+ """
134
+ return summary
135
+
136
+
137
+ def _enhance_with_llm(
138
+ plan,
139
+ detections: list[Detection],
140
+ location: Optional[str],
141
+ notes: Optional[str]
142
+ ) -> str:
143
+ """Use LLM to create more detailed, context-aware action summary."""
144
+ detection_summary = f"{len(detections)} items detected: "
145
+ detection_summary += ", ".join(set(d["label"] for d in detections))
146
+
147
+ context_parts = [
148
+ f"Trash detection summary: {detection_summary}",
149
+ f"Severity assessment: {plan['severity']}",
150
+ f"Recommended volunteers: {plan['recommended_volunteers']}",
151
+ f"Estimated time: {plan['estimated_time_minutes']} minutes"
152
+ ]
153
+
154
+ if location:
155
+ context_parts.append(f"Location: {location}")
156
+ if notes:
157
+ context_parts.append(f"Additional context: {notes}")
158
+
159
+ prompt = f"""Based on this trash detection analysis, create a clear, actionable cleanup plan summary:
160
+
161
+ {chr(10).join(context_parts)}
162
+
163
+ Write a brief, practical summary that:
164
+ 1. States the situation clearly
165
+ 2. Recommends specific actions
166
+ 3. Estimates resources needed
167
+ 4. Explains why this matters for the environment
168
+
169
+ Keep it concise (3-4 sentences)."""
170
+
171
+ try:
172
+ enhanced = llm_client.generate_text(
173
+ prompt,
174
+ system_prompt="You are a helpful environmental cleanup coordinator. Be practical and encouraging.",
175
+ max_tokens=300,
176
+ temperature=0.7
177
+ )
178
+ return enhanced
179
+ except Exception as e:
180
+ print(f"LLM enhancement failed: {e}")
181
+ return plan["action_summary"]
tools/history_tool.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ History Tracking MCP Tool
3
+
4
+ Logs trash detection events and provides querying capabilities for analysis.
5
+ Uses SQLite for persistent storage.
6
+ """
7
+
8
+ import sqlite3
9
+ import json
10
+ from datetime import datetime, timedelta
11
+ from typing import Optional, Any
12
+ from pathlib import Path
13
+ from trash_model import Detection
14
+
15
+
16
+ DB_PATH = Path(__file__).parent.parent / "data" / "trash_events.db"
17
+
18
+
19
+ def _get_connection() -> sqlite3.Connection:
20
+ """Get SQLite connection and ensure schema exists."""
21
+ DB_PATH.parent.mkdir(parents=True, exist_ok=True)
22
+ conn = sqlite3.connect(str(DB_PATH))
23
+ conn.row_factory = sqlite3.Row
24
+
25
+ # Create schema if not exists
26
+ conn.execute("""
27
+ CREATE TABLE IF NOT EXISTS events (
28
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
29
+ timestamp TEXT NOT NULL,
30
+ location TEXT,
31
+ latitude REAL,
32
+ longitude REAL,
33
+ severity TEXT NOT NULL,
34
+ trash_count INTEGER NOT NULL,
35
+ categories TEXT NOT NULL,
36
+ detections_json TEXT NOT NULL,
37
+ notes TEXT,
38
+ image_path TEXT,
39
+ cleaned BOOLEAN DEFAULT 0,
40
+ created_at TEXT DEFAULT CURRENT_TIMESTAMP
41
+ )
42
+ """)
43
+
44
+ conn.execute("""
45
+ CREATE INDEX IF NOT EXISTS idx_timestamp ON events(timestamp)
46
+ """)
47
+
48
+ conn.execute("""
49
+ CREATE INDEX IF NOT EXISTS idx_location ON events(location)
50
+ """)
51
+
52
+ conn.execute("""
53
+ CREATE INDEX IF NOT EXISTS idx_severity ON events(severity)
54
+ """)
55
+
56
+ conn.commit()
57
+ return conn
58
+
59
+
60
+ def log_event(
61
+ detections: list[Detection],
62
+ severity: str,
63
+ location: Optional[str] = None,
64
+ notes: Optional[str] = None,
65
+ image_path: Optional[str] = None,
66
+ latitude: Optional[float] = None,
67
+ longitude: Optional[float] = None
68
+ ):
69
+ """
70
+ Log a trash detection event to the database.
71
+
72
+ Args:
73
+ detections: List of trash detections
74
+ severity: "low" | "medium" | "high"
75
+ location: Human-readable location description
76
+ notes: Optional user notes
77
+ image_path: Optional path to saved image
78
+ latitude: Optional GPS latitude
79
+ longitude: Optional GPS longitude
80
+
81
+ Returns:
82
+ Dict with event_id and confirmation message
83
+ """
84
+ conn = _get_connection()
85
+
86
+ timestamp = datetime.now().isoformat()
87
+ trash_count = len(detections)
88
+ categories = json.dumps(list(set(d["label"] for d in detections)))
89
+ detections_json = json.dumps(detections)
90
+
91
+ cursor = conn.execute("""
92
+ INSERT INTO events (
93
+ timestamp, location, latitude, longitude, severity,
94
+ trash_count, categories, detections_json, notes, image_path
95
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
96
+ """, (
97
+ timestamp, location, latitude, longitude, severity,
98
+ trash_count, categories, detections_json, notes, image_path
99
+ ))
100
+
101
+ event_id = cursor.lastrowid
102
+ conn.commit()
103
+ conn.close()
104
+
105
+ return {
106
+ "event_id": event_id,
107
+ "timestamp": timestamp,
108
+ "message": f"Event logged successfully (ID: {event_id})"
109
+ }
110
+
111
+
112
+ def query_events(
113
+ days: Optional[int] = None,
114
+ location: Optional[str] = None,
115
+ severity: Optional[str] = None,
116
+ min_trash_count: Optional[int] = None,
117
+ cleaned_only: bool = False,
118
+ limit: int = 100
119
+ ):
120
+ """
121
+ Query trash events with filtering options.
122
+
123
+ Args:
124
+ days: Only events from last N days
125
+ location: Filter by location (partial match)
126
+ severity: Filter by severity level
127
+ min_trash_count: Minimum trash items threshold
128
+ cleaned_only: Only show cleaned events
129
+ limit: Maximum results to return
130
+
131
+ Returns:
132
+ Dict containing:
133
+ - events: List of matching events
134
+ - total_count: Total matching events
135
+ - summary: Aggregate statistics
136
+ """
137
+ conn = _get_connection()
138
+
139
+ # Build query
140
+ conditions = []
141
+ params = []
142
+
143
+ if days:
144
+ cutoff = (datetime.now() - timedelta(days=days)).isoformat()
145
+ conditions.append("timestamp >= ?")
146
+ params.append(cutoff)
147
+
148
+ if location:
149
+ conditions.append("location LIKE ?")
150
+ params.append(f"%{location}%")
151
+
152
+ if severity:
153
+ conditions.append("severity = ?")
154
+ params.append(severity)
155
+
156
+ if min_trash_count:
157
+ conditions.append("trash_count >= ?")
158
+ params.append(min_trash_count)
159
+
160
+ if cleaned_only:
161
+ conditions.append("cleaned = 1")
162
+
163
+ where_clause = " AND ".join(conditions) if conditions else "1=1"
164
+
165
+ # Get events
166
+ query = f"""
167
+ SELECT
168
+ id, timestamp, location, latitude, longitude,
169
+ severity, trash_count, categories, notes, image_path, cleaned
170
+ FROM events
171
+ WHERE {where_clause}
172
+ ORDER BY timestamp DESC
173
+ LIMIT ?
174
+ """
175
+ params.append(limit)
176
+
177
+ cursor = conn.execute(query, params)
178
+ events = [dict(row) for row in cursor.fetchall()]
179
+
180
+ # Parse JSON fields
181
+ for event in events:
182
+ event["categories"] = json.loads(event["categories"])
183
+
184
+ # Get summary statistics
185
+ summary_query = f"""
186
+ SELECT
187
+ COUNT(*) as total_events,
188
+ SUM(trash_count) as total_trash_items,
189
+ AVG(trash_count) as avg_trash_per_event,
190
+ COUNT(DISTINCT location) as unique_locations
191
+ FROM events
192
+ WHERE {where_clause}
193
+ """
194
+ summary_cursor = conn.execute(summary_query, params[:-1]) # Exclude limit param
195
+ summary = dict(summary_cursor.fetchone())
196
+
197
+ conn.close()
198
+
199
+ return {
200
+ "events": events,
201
+ "total_count": len(events),
202
+ "summary": summary,
203
+ "filters_applied": {
204
+ "days": days,
205
+ "location": location,
206
+ "severity": severity,
207
+ "min_trash_count": min_trash_count
208
+ }
209
+ }
210
+
211
+
212
+ def get_hotspots(min_events: int = 2, days: Optional[int] = 30):
213
+ """
214
+ Identify locations with recurring trash problems.
215
+
216
+ Args:
217
+ min_events: Minimum number of events to qualify as hotspot
218
+ days: Time window to analyze (None = all time)
219
+
220
+ Returns:
221
+ Dict with hotspot locations and their statistics
222
+ """
223
+ conn = _get_connection()
224
+
225
+ conditions = ["location IS NOT NULL"]
226
+ params = []
227
+
228
+ if days:
229
+ cutoff = (datetime.now() - timedelta(days=days)).isoformat()
230
+ conditions.append("timestamp >= ?")
231
+ params.append(cutoff)
232
+
233
+ where_clause = " AND ".join(conditions)
234
+
235
+ query = f"""
236
+ SELECT
237
+ location,
238
+ COUNT(*) as event_count,
239
+ SUM(trash_count) as total_trash,
240
+ AVG(trash_count) as avg_trash,
241
+ MAX(timestamp) as last_event,
242
+ GROUP_CONCAT(DISTINCT severity) as severities
243
+ FROM events
244
+ WHERE {where_clause}
245
+ GROUP BY location
246
+ HAVING event_count >= ?
247
+ ORDER BY event_count DESC, total_trash DESC
248
+ """
249
+ params.append(min_events)
250
+
251
+ cursor = conn.execute(query, params)
252
+ hotspots = [dict(row) for row in cursor.fetchall()]
253
+
254
+ conn.close()
255
+
256
+ return {
257
+ "hotspots": hotspots,
258
+ "count": len(hotspots),
259
+ "criteria": f"Locations with {min_events}+ events" + (f" in last {days} days" if days else "")
260
+ }
261
+
262
+
263
+ def mark_cleaned(event_id: int):
264
+ """Mark an event as cleaned."""
265
+ conn = _get_connection()
266
+ conn.execute("UPDATE events SET cleaned = 1 WHERE id = ?", (event_id,))
267
+ conn.commit()
268
+ conn.close()
269
+
270
+ return {"message": f"Event {event_id} marked as cleaned"}
tools/report_generator_tool.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Report Generator MCP Tool
3
+
4
+ Generates formatted reports for trash detection events.
5
+ Suitable for city authorities, community groups, or documentation.
6
+ """
7
+
8
+ from typing import Optional, Any
9
+ from datetime import datetime
10
+ from trash_model import Detection
11
+ import llm_client
12
+
13
+
14
+ def generate_report(
15
+ detections: list[Detection],
16
+ severity: str,
17
+ location: Optional[str] = None,
18
+ notes: Optional[str] = None,
19
+ event_id: Optional[int] = None,
20
+ plan: Optional[Any] = None,
21
+ format: str = "email"
22
+ ):
23
+ """
24
+ Generate a formatted report for trash detection event.
25
+
26
+ Args:
27
+ detections: List of trash detections
28
+ severity: Severity level
29
+ location: Location description
30
+ notes: Additional notes
31
+ event_id: Database event ID if logged
32
+ plan: Optional cleanup plan data
33
+ format: "email" | "markdown" | "plain"
34
+
35
+ Returns:
36
+ Dict with report text and metadata
37
+ """
38
+ if format == "email":
39
+ report_text = _generate_email_report(detections, severity, location, notes, plan)
40
+ elif format == "markdown":
41
+ report_text = _generate_markdown_report(detections, severity, location, notes, event_id, plan)
42
+ else:
43
+ report_text = _generate_plain_report(detections, severity, location, notes, plan)
44
+
45
+ return {
46
+ "report": report_text,
47
+ "format": format,
48
+ "generated_at": datetime.now().isoformat(),
49
+ "event_id": event_id
50
+ }
51
+
52
+
53
+ def _generate_email_report(
54
+ detections: list[Detection],
55
+ severity: str,
56
+ location: Optional[str],
57
+ notes: Optional[str],
58
+ plan: Optional[Any]
59
+ ) -> str:
60
+ """Generate email-formatted report for city authorities."""
61
+ location_str = location or "[Location to be specified]"
62
+ date_str = datetime.now().strftime("%B %d, %Y")
63
+
64
+ # Summarize detections
65
+ categories = list(set(d["label"] for d in detections))
66
+ category_counts = {}
67
+ for det in detections:
68
+ label = det["label"]
69
+ category_counts[label] = category_counts.get(label, 0) + 1
70
+
71
+ items_list = "\n".join([
72
+ f" • {label.replace('_', ' ').title()}: {count} item(s)"
73
+ for label, count in sorted(category_counts.items())
74
+ ])
75
+
76
+ urgency_text = {
77
+ "high": "URGENT - Immediate attention required",
78
+ "medium": "Moderate priority - Action needed within 1-3 days",
79
+ "low": "Low priority - Routine cleanup recommended"
80
+ }
81
+
82
+ template = f"""Subject: Trash Cleanup Request - {location_str}
83
+
84
+ Dear City Services / Environmental Department,
85
+
86
+ I am writing to report significant litter accumulation that requires attention at the following location:
87
+
88
+ **Location:** {location_str}
89
+ **Date Reported:** {date_str}
90
+ **Severity Level:** {severity.upper()} ({urgency_text.get(severity, '')})
91
+
92
+ **Details of Trash Observed:**
93
+ Total items detected: {len(detections)}
94
+
95
+ {items_list}
96
+ """
97
+
98
+ if notes:
99
+ template += f"\n**Additional Context:**\n{notes}\n"
100
+
101
+ if plan:
102
+ template += f"""
103
+ **Recommended Action:**
104
+ - Estimated cleanup time: {plan.get('estimated_time_minutes', 'N/A')} minutes
105
+ - Volunteers needed: {plan.get('recommended_volunteers', 'N/A')}
106
+ - Equipment required: {', '.join(plan.get('equipment_needed', []))}
107
+ - Urgency: Action within {plan.get('urgency_days', 'N/A')} day(s)
108
+ """
109
+
110
+ template += """
111
+ This accumulation poses environmental and health concerns for the community. I would appreciate a timely response regarding cleanup scheduling.
112
+
113
+ Thank you for your attention to this matter.
114
+
115
+ Best regards,
116
+ [Your Name / Community Group]
117
+ [Contact Information]
118
+ """
119
+
120
+ return template
121
+
122
+
123
+ def _generate_markdown_report(
124
+ detections: list[Detection],
125
+ severity: str,
126
+ location: Optional[str],
127
+ notes: Optional[str],
128
+ event_id: Optional[int],
129
+ plan: Optional[Any]
130
+ ) -> str:
131
+ """Generate Markdown-formatted report for documentation."""
132
+ location_str = location or "Unspecified location"
133
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
134
+
135
+ # Group detections
136
+ category_counts = {}
137
+ for det in detections:
138
+ label = det["label"]
139
+ category_counts[label] = category_counts.get(label, 0) + 1
140
+
141
+ report = f"""# Trash Detection Report
142
+
143
+ ## Event Information
144
+ - **Event ID:** {event_id if event_id else 'Not logged'}
145
+ - **Timestamp:** {timestamp}
146
+ - **Location:** {location_str}
147
+ - **Severity:** {severity.upper()}
148
+
149
+ ## Detection Summary
150
+ - **Total Items:** {len(detections)}
151
+ - **Unique Categories:** {len(category_counts)}
152
+
153
+ ### Items Breakdown
154
+ """
155
+
156
+ for label, count in sorted(category_counts.items(), key=lambda x: x[1], reverse=True):
157
+ report += f"- **{label.replace('_', ' ').title()}:** {count} item(s)\n"
158
+
159
+ if plan:
160
+ report += f"""
161
+ ## Cleanup Plan
162
+ - **Recommended Volunteers:** {plan.get('recommended_volunteers', 'N/A')}
163
+ - **Estimated Time:** {plan.get('estimated_time_minutes', 'N/A')} minutes
164
+ - **Urgency:** Within {plan.get('urgency_days', 'N/A')} day(s)
165
+ - **Equipment Needed:**
166
+ """
167
+ for equipment in plan.get('equipment_needed', []):
168
+ report += f" - {equipment}\n"
169
+
170
+ report += f"\n### Environmental Impact\n{plan.get('environmental_impact', 'N/A')}\n"
171
+
172
+ if notes:
173
+ report += f"\n## Additional Notes\n{notes}\n"
174
+
175
+ report += "\n---\n*Generated by CleanCity Agent*"
176
+
177
+ return report
178
+
179
+
180
+ def _generate_plain_report(
181
+ detections: list[Detection],
182
+ severity: str,
183
+ location: Optional[str],
184
+ notes: Optional[str],
185
+ plan: Optional[Any]
186
+ ) -> str:
187
+ """Generate plain text report."""
188
+ lines = [
189
+ "=" * 60,
190
+ "TRASH DETECTION REPORT",
191
+ "=" * 60,
192
+ "",
193
+ f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
194
+ f"Location: {location or 'Unspecified'}",
195
+ f"Severity: {severity.upper()}",
196
+ "",
197
+ f"Total Items Detected: {len(detections)}",
198
+ ""
199
+ ]
200
+
201
+ # Group items
202
+ category_counts = {}
203
+ for det in detections:
204
+ label = det["label"]
205
+ category_counts[label] = category_counts.get(label, 0) + 1
206
+
207
+ lines.append("Items by Category:")
208
+ for label, count in sorted(category_counts.items()):
209
+ lines.append(f" - {label.replace('_', ' ').title()}: {count}")
210
+
211
+ if plan:
212
+ lines.extend([
213
+ "",
214
+ "Cleanup Recommendations:",
215
+ f" - Volunteers needed: {plan.get('recommended_volunteers', 'N/A')}",
216
+ f" - Estimated time: {plan.get('estimated_time_minutes', 'N/A')} minutes",
217
+ f" - Action within: {plan.get('urgency_days', 'N/A')} day(s)",
218
+ f" - Equipment: {', '.join(plan.get('equipment_needed', []))}"
219
+ ])
220
+
221
+ if notes:
222
+ lines.extend(["", "Notes:", notes])
223
+
224
+ lines.extend(["", "=" * 60])
225
+
226
+ return "\n".join(lines)
227
+
228
+
229
+ def generate_llm_enhanced_report(
230
+ detections: list[Detection],
231
+ severity: str,
232
+ location: Optional[str] = None,
233
+ notes: Optional[str] = None,
234
+ plan: Optional[Any] = None
235
+ ) -> str:
236
+ """Use LLM to generate a more sophisticated, context-aware report."""
237
+ context = f"""Trash detection event:
238
+ - Location: {location or 'unspecified'}
239
+ - Items detected: {len(detections)}
240
+ - Severity: {severity}
241
+ - Categories: {', '.join(set(d['label'] for d in detections))}
242
+ """
243
+
244
+ if notes:
245
+ context += f"- Context: {notes}\n"
246
+
247
+ if plan:
248
+ context += f"- Recommended volunteers: {plan.get('recommended_volunteers')}\n"
249
+ context += f"- Estimated cleanup time: {plan.get('estimated_time_minutes')} minutes\n"
250
+
251
+ prompt = f"""Based on this trash detection data, write a professional report suitable for city authorities:
252
+
253
+ {context}
254
+
255
+ Create a clear, actionable report that:
256
+ 1. Describes the situation factually
257
+ 2. Emphasizes environmental/community impact
258
+ 3. Provides specific cleanup recommendations
259
+ 4. Has an appropriate professional tone
260
+
261
+ Format as an email that could be sent to city services."""
262
+
263
+ try:
264
+ report = llm_client.generate_text(
265
+ prompt,
266
+ system_prompt="You are a professional environmental reporter writing to city officials.",
267
+ max_tokens=500,
268
+ temperature=0.5
269
+ )
270
+ return report
271
+ except Exception as e:
272
+ print(f"LLM report generation failed: {e}")
273
+ # Fallback to template
274
+ return _generate_email_report(detections, severity, location, notes, plan)
tools/trash_detection_tool.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Trash Detection MCP Tool
3
+
4
+ Wraps the trash detection model for use as an MCP tool.
5
+ """
6
+
7
+ from typing import Any
8
+ from PIL import Image
9
+ import base64
10
+ from io import BytesIO
11
+ import json
12
+
13
+ from trash_model import detect_trash, Detection
14
+
15
+
16
+ def detect_trash_mcp(image_data: str | dict):
17
+ """
18
+ MCP tool wrapper for trash detection.
19
+
20
+ Args:
21
+ image_data: Either:
22
+ - Base64 encoded image string
23
+ - Dict with 'path' key pointing to image file
24
+ - Dict with 'base64' key containing base64 image
25
+
26
+ Returns:
27
+ Dict containing:
28
+ - detections: List of trash objects found
29
+ - count: Total number of items detected
30
+ - categories: Unique trash categories found
31
+ - summary: Human-readable summary
32
+ """
33
+ # Parse input
34
+ image = _load_image_from_input(image_data)
35
+
36
+ # Run detection
37
+ detections = detect_trash(image)
38
+
39
+ # Analyze results
40
+ categories = list(set(d["label"] for d in detections))
41
+ avg_confidence = sum(d["score"] for d in detections) / len(detections) if detections else 0
42
+
43
+ summary = f"Detected {len(detections)} trash items across {len(categories)} categories. "
44
+ summary += f"Average confidence: {avg_confidence:.1%}"
45
+
46
+ return {
47
+ "detections": detections,
48
+ "count": len(detections),
49
+ "categories": categories,
50
+ "average_confidence": avg_confidence,
51
+ "summary": summary,
52
+ "image_dimensions": {"width": image.width, "height": image.height}
53
+ }
54
+
55
+
56
+ def _load_image_from_input(image_data: str | dict) -> Image.Image:
57
+ """Load PIL Image from various input formats."""
58
+ if isinstance(image_data, str):
59
+ # Assume base64 encoded
60
+ if image_data.startswith('data:image'):
61
+ # Remove data URL prefix
62
+ image_data = image_data.split(',', 1)[1]
63
+ image_bytes = base64.b64decode(image_data)
64
+ return Image.open(BytesIO(image_bytes))
65
+
66
+ elif isinstance(image_data, dict):
67
+ if 'path' in image_data:
68
+ return Image.open(image_data['path'])
69
+ elif 'base64' in image_data:
70
+ image_bytes = base64.b64decode(image_data['base64'])
71
+ return Image.open(BytesIO(image_bytes))
72
+
73
+ raise ValueError("Invalid image_data format. Provide base64 string or dict with 'path' or 'base64' key")
74
+
75
+
76
+ def format_detections_for_display(detections: list[Detection]) -> str:
77
+ """Format detection results as readable text."""
78
+ if not detections:
79
+ return "No trash detected in the image."
80
+
81
+ lines = [f"Found {len(detections)} trash items:\n"]
82
+
83
+ # Group by category
84
+ by_category = {}
85
+ for det in detections:
86
+ category = det["label"]
87
+ if category not in by_category:
88
+ by_category[category] = []
89
+ by_category[category].append(det)
90
+
91
+ for category, items in sorted(by_category.items()):
92
+ avg_conf = sum(d["score"] for d in items) / len(items)
93
+ lines.append(f" • {category}: {len(items)} item(s) (confidence: {avg_conf:.1%})")
94
+
95
+ return "\n".join(lines)
trash_model.py CHANGED
@@ -1,251 +1,127 @@
1
- """
2
- CleanCity Agent - Trash Detection Module
3
-
4
- Modern YOLO-based trash detection using Ultralytics (latest version).
5
- Supports YOLOv8, YOLOv9, YOLOv10, and YOLOv11 models.
6
-
7
- Features:
8
- - Automatic model loading with caching
9
- - Configurable confidence threshold
10
- - Type-safe detection results
11
- - Graceful error handling
12
- - Environment variable configuration
13
- """
14
-
15
- import os
16
- from typing import TypedDict, Optional
17
- from pathlib import Path
18
- from PIL import Image
19
- import logging
20
-
21
- # Configure logging
22
- logging.basicConfig(level=logging.INFO)
23
- logger = logging.getLogger(__name__)
24
-
25
- # Import YOLO with error handling
26
- try:
27
- from ultralytics import YOLO
28
- YOLO_AVAILABLE = True
29
- except ImportError:
30
- YOLO_AVAILABLE = False
31
- logger.warning("⚠️ Ultralytics not installed. Run: pip install ultralytics")
32
-
33
-
34
- class Detection(TypedDict):
35
- """Type-safe trash detection result."""
36
- bbox: list[float] # [x1, y1, x2, y2] in pixels
37
- label: str # Trash category/class name
38
- score: float # Confidence score (0.0 to 1.0)
39
- class_id: int # Class ID from model
40
-
41
-
42
- class TrashDetector:
43
- """
44
- Modern trash detection using YOLO.
45
-
46
- Singleton pattern - model loaded once and cached.
47
- Thread-safe for multiple detections.
48
- """
49
-
50
- _instance: Optional['TrashDetector'] = None
51
- _model: Optional[YOLO] = None
52
-
53
- def __new__(cls):
54
- """Ensure singleton instance."""
55
- if cls._instance is None:
56
- cls._instance = super().__new__(cls)
57
- return cls._instance
58
-
59
- def __init__(self):
60
- """Initialize detector (only runs once)."""
61
- if self._model is None:
62
- self._load_model()
63
-
64
- def _load_model(self) -> None:
65
- """Load YOLO model from weights file."""
66
- if not YOLO_AVAILABLE:
67
- raise ImportError(
68
- "Ultralytics package not installed. "
69
- "Install with: pip install ultralytics"
70
- )
71
-
72
- # Get model path from environment or use default
73
- model_path = os.getenv("YOLO_MODEL_PATH", "Weights/best.pt")
74
- model_file = Path(model_path)
75
-
76
- if not model_file.exists():
77
- raise FileNotFoundError(
78
- f"YOLO model file not found: {model_path}\n"
79
- f"Please ensure the model file exists or update YOLO_MODEL_PATH in .env"
80
- )
81
-
82
- logger.info(f"🔄 Loading YOLO model from {model_path}...")
83
-
84
- try:
85
- self._model = YOLO(str(model_file))
86
- logger.info(f"✅ Model loaded successfully!")
87
- logger.info(f" Model type: {self._model.model_name if hasattr(self._model, 'model_name') else 'YOLO'}")
88
- logger.info(f" Classes ({len(self._model.names)}): {list(self._model.names.values())}")
89
- except Exception as e:
90
- logger.error(f"❌ Failed to load model: {e}")
91
- raise
92
-
93
- @property
94
- def model(self) -> YOLO:
95
- """Get the loaded YOLO model."""
96
- if self._model is None:
97
- raise RuntimeError("Model not loaded. Call _load_model() first.")
98
- return self._model
99
-
100
- @property
101
- def class_names(self) -> dict[int, str]:
102
- """Get mapping of class IDs to names."""
103
- return self.model.names
104
-
105
- def detect(
106
- self,
107
- image: Image.Image,
108
- conf_threshold: Optional[float] = None,
109
- verbose: bool = False
110
- ) -> list[Detection]:
111
- """
112
- Detect trash objects in an image.
113
-
114
- Args:
115
- image: PIL Image to analyze
116
- conf_threshold: Confidence threshold (0.0-1.0). Uses env var if None.
117
- verbose: Show detailed YOLO output
118
-
119
- Returns:
120
- List of detections with bounding boxes, labels, and scores
121
-
122
- Raises:
123
- RuntimeError: If model not loaded or detection fails
124
- """
125
- # Get confidence threshold
126
- if conf_threshold is None:
127
- conf_threshold = float(os.getenv("YOLO_CONFIDENCE", "0.25"))
128
-
129
- # Get verbose setting
130
- if not verbose:
131
- verbose = os.getenv("YOLO_VERBOSE", "false").lower() == "true"
132
-
133
- try:
134
- # Run inference
135
- results = self.model(
136
- image,
137
- conf=conf_threshold,
138
- verbose=verbose
139
- )
140
-
141
- # Parse results
142
- detections: list[Detection] = []
143
-
144
- if len(results) > 0:
145
- result = results[0]
146
-
147
- # Check if any detections found
148
- if result.boxes is not None and len(result.boxes) > 0:
149
- # Extract detection data
150
- boxes = result.boxes.xyxy.cpu().numpy() # [x1, y1, x2, y2]
151
- confidences = result.boxes.conf.cpu().numpy()
152
- class_ids = result.boxes.cls.cpu().numpy().astype(int)
153
-
154
- # Convert to Detection format
155
- for box, conf, cls_id in zip(boxes, confidences, class_ids):
156
- detection: Detection = {
157
- "bbox": box.tolist(),
158
- "label": self.model.names[cls_id],
159
- "score": float(conf),
160
- "class_id": int(cls_id)
161
- }
162
- detections.append(detection)
163
-
164
- logger.info(f"✅ Detected {len(detections)} trash objects")
165
- else:
166
- logger.info("ℹ️ No trash detected in image")
167
-
168
- return detections
169
-
170
- except Exception as e:
171
- logger.error(f"❌ Detection failed: {e}")
172
- raise RuntimeError(f"Detection failed: {e}")
173
-
174
-
175
- # ============================================================================
176
- # Public API Functions
177
- # ============================================================================
178
-
179
- def detect_trash(
180
- image: Image.Image,
181
- conf_threshold: Optional[float] = None
182
- ) -> list[Detection]:
183
- """
184
- Detect trash in an image (convenience function).
185
-
186
- Args:
187
- image: PIL Image to analyze
188
- conf_threshold: Confidence threshold (0.0-1.0)
189
-
190
- Returns:
191
- List of trash detections
192
- """
193
- detector = TrashDetector()
194
- return detector.detect(image, conf_threshold)
195
-
196
-
197
- def get_model_info() -> dict:
198
- """
199
- Get information about the loaded model.
200
-
201
- Returns:
202
- Dictionary with model metadata
203
- """
204
- detector = TrashDetector()
205
- return {
206
- "available": YOLO_AVAILABLE,
207
- "loaded": detector._model is not None,
208
- "classes": detector.class_names if detector._model else {},
209
- "num_classes": len(detector.class_names) if detector._model else 0,
210
- }
211
-
212
-
213
- # ============================================================================
214
- # Module Test
215
- # ============================================================================
216
-
217
- if __name__ == "__main__":
218
- """Test the trash detection module."""
219
- print("=" * 70)
220
- print("Testing CleanCity Trash Detection Module")
221
- print("=" * 70)
222
-
223
- # Check if YOLO is available
224
- print(f"\n1. YOLO Available: {YOLO_AVAILABLE}")
225
-
226
- if not YOLO_AVAILABLE:
227
- print("❌ Please install ultralytics: pip install ultralytics")
228
- exit(1)
229
-
230
- # Test model loading
231
- print("\n2. Testing model loading...")
232
- try:
233
- detector = TrashDetector()
234
- print("✅ Model loaded successfully")
235
- except Exception as e:
236
- print(f"❌ Model loading failed: {e}")
237
- exit(1)
238
-
239
- # Print model info
240
- print("\n3. Model Information:")
241
- info = get_model_info()
242
- print(f" Classes: {info['num_classes']}")
243
- print(f" Labels: {list(info['classes'].values())}")
244
-
245
- # Test with a sample image (if exists)
246
- print("\n4. Testing detection (requires sample image)...")
247
- print(" To test detection, provide a test image")
248
-
249
- print("\n" + "=" * 70)
250
- print("✅ Module test complete!")
251
- print("=" * 70)
 
1
+ """
2
+ Trash Detection Model Wrapper
3
+
4
+ This module provides an interface for trash detection in images using YOLOv8.
5
+ Loads a trained model from Weights/best.pt for real trash detection.
6
+ """
7
+
8
+ from typing import TypedDict
9
+ from PIL import Image
10
+ from pathlib import Path
11
+ import numpy as np
12
+
13
+ # Import YOLO from ultralytics
14
+ try:
15
+ from ultralytics import YOLO
16
+ YOLO_AVAILABLE = True
17
+ except ImportError:
18
+ YOLO_AVAILABLE = False
19
+ print("⚠️ Ultralytics not available. Install with: pip install ultralytics")
20
+
21
+
22
+ class Detection(TypedDict):
23
+ """Single trash detection result."""
24
+ bbox: list[float] # [x1, y1, x2, y2] in pixels
25
+ label: str # Trash category
26
+ score: float # Confidence score (0-1)
27
+
28
+
29
+ # Global model instance (loaded once)
30
+ _model = None
31
+
32
+
33
+ def load_model(model_path: str = "Weights/best.pt") -> YOLO:
34
+ """
35
+ Load the YOLOv8 trash detection model.
36
+
37
+ Args:
38
+ model_path: Path to the model weights file
39
+
40
+ Returns:
41
+ Loaded YOLO model instance
42
+ """
43
+ global _model
44
+
45
+ if _model is None:
46
+ if not YOLO_AVAILABLE:
47
+ raise ImportError("Ultralytics not installed. Run: pip install ultralytics")
48
+
49
+ model_file = Path(model_path)
50
+ if not model_file.exists():
51
+ raise FileNotFoundError(f"Model file not found: {model_path}")
52
+
53
+ print(f"🔄 Loading YOLO model from {model_path}...")
54
+ _model = YOLO(str(model_file))
55
+ print(f"✅ Model loaded successfully!")
56
+ print(f" Classes: {_model.names}")
57
+
58
+ return _model
59
+
60
+
61
+ def detect_trash(image: Image.Image, conf_threshold: float = 0.25) -> list[Detection]:
62
+ """
63
+ Detect trash objects in an image using YOLOv8.
64
+
65
+ Args:
66
+ image: PIL Image to analyze
67
+ conf_threshold: Confidence threshold for detections (0-1)
68
+
69
+ Returns:
70
+ List of detections with bounding boxes, labels, and confidence scores
71
+ """
72
+ try:
73
+ # Load model (only happens once)
74
+ model = load_model()
75
+
76
+ # Run inference
77
+ results = model(image, conf=conf_threshold, verbose=False)
78
+
79
+ # Parse results
80
+ detections: list[Detection] = []
81
+
82
+ # Get the first result (single image)
83
+ if len(results) > 0:
84
+ result = results[0]
85
+
86
+ # Extract boxes, classes, and scores
87
+ if result.boxes is not None and len(result.boxes) > 0:
88
+ boxes = result.boxes.xyxy.cpu().numpy() # [x1, y1, x2, y2]
89
+ confidences = result.boxes.conf.cpu().numpy()
90
+ class_ids = result.boxes.cls.cpu().numpy().astype(int)
91
+
92
+ # Convert to Detection format
93
+ for box, conf, cls_id in zip(boxes, confidences, class_ids):
94
+ # Get class name
95
+ label = model.names[cls_id]
96
+
97
+ detection: Detection = {
98
+ "bbox": box.tolist(), # [x1, y1, x2, y2]
99
+ "label": label,
100
+ "score": float(conf)
101
+ }
102
+ detections.append(detection)
103
+
104
+ return detections
105
+
106
+ except Exception as e:
107
+ print(f"❌ Error during detection: {e}")
108
+ print(" Falling back to empty detection list")
109
+ return []
110
+
111
+
112
+ def get_model_info():
113
+ """Get information about the loaded model."""
114
+ try:
115
+ model = load_model()
116
+ return {
117
+ "model_type": "YOLOv8",
118
+ "classes": model.names,
119
+ "num_classes": len(model.names),
120
+ "model_path": "Weights/best.pt"
121
+ }
122
+ except Exception as e:
123
+ return {
124
+ "error": str(e),
125
+ "model_type": "None",
126
+ "status": "Model not loaded"
127
+ }