aliroohan179 commited on
Commit
6810092
·
verified ·
1 Parent(s): f2bc7d5
Files changed (13) hide show
  1. .dockerignore +47 -0
  2. .gitignore +57 -0
  3. Dockerfile +58 -0
  4. Dockerfile.local +42 -0
  5. README.md +326 -10
  6. README_HUGGINGFACE.md +181 -0
  7. docker-build.sh +180 -0
  8. docker-compose.yml +39 -0
  9. main.py +292 -0
  10. requirements.txt +12 -0
  11. run.sh +53 -0
  12. setup.sh +25 -0
  13. test_api.py +196 -0
.dockerignore ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ venv/
8
+ env/
9
+ ENV/
10
+ .venv
11
+
12
+ # IDEs
13
+ .vscode/
14
+ .idea/
15
+ *.swp
16
+ *.swo
17
+ *~
18
+
19
+ # Git
20
+ .git/
21
+ .gitignore
22
+
23
+ # Documentation
24
+ README.md
25
+ *.md
26
+ !README_HUGGINGFACE.md
27
+
28
+ # Scripts
29
+ setup.sh
30
+ run.sh
31
+
32
+ # Testing
33
+ .pytest_cache/
34
+ .coverage
35
+ htmlcov/
36
+
37
+ # OS
38
+ .DS_Store
39
+ Thumbs.db
40
+
41
+ # Logs
42
+ *.log
43
+
44
+ # Temporary files
45
+ tmp/
46
+ temp/
47
+
.gitignore ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ pip-wheel-metadata/
20
+ share/python-wheels/
21
+ *.egg-info/
22
+ .installed.cfg
23
+ *.egg
24
+ MANIFEST
25
+
26
+ # Virtual Environment
27
+ venv/
28
+ env/
29
+ ENV/
30
+ env.bak/
31
+ venv.bak/
32
+
33
+ # PyCharm
34
+ .idea/
35
+
36
+ # VS Code
37
+ .vscode/
38
+
39
+ # Environment variables
40
+ .env
41
+
42
+ # SAM Model Checkpoints (large files)
43
+ *.pth
44
+
45
+ # Test images
46
+ test_images/
47
+ temp/
48
+ uploads/
49
+
50
+ # Logs
51
+ *.log
52
+ logs/
53
+
54
+ # OS
55
+ .DS_Store
56
+ Thumbs.db
57
+
Dockerfile ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.10 slim image for smaller size
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies required for OpenCV and other packages
8
+ RUN apt-get update && apt-get install -y \
9
+ libgl1-mesa-glx \
10
+ libglib2.0-0 \
11
+ libsm6 \
12
+ libxext6 \
13
+ libxrender-dev \
14
+ libgomp1 \
15
+ wget \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Copy requirements first for better caching
19
+ COPY requirements.txt .
20
+
21
+ # Install Python dependencies
22
+ # Install PyTorch CPU version to reduce image size (GPU not available on HF free tier)
23
+ RUN pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu && \
24
+ pip install --no-cache-dir -r requirements.txt
25
+
26
+ # Copy application code
27
+ COPY main.py .
28
+
29
+ # Download SAM model (using smaller vit_b model for HF)
30
+ # You can change this to vit_h or vit_l if needed, but they're larger
31
+ RUN wget -q https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth || \
32
+ echo "Warning: Could not download SAM model. App will run with fallback methods."
33
+
34
+ # Update main.py to use the vit_b model if downloaded
35
+ RUN if [ -f "sam_vit_b_01ec64.pth" ]; then \
36
+ sed -i 's/sam_vit_h_4b8939.pth/sam_vit_b_01ec64.pth/g' main.py && \
37
+ sed -i 's/vit_h/vit_b/g' main.py; \
38
+ fi
39
+
40
+ # Create a non-root user for Hugging Face
41
+ RUN useradd -m -u 1000 user
42
+ USER user
43
+
44
+ # Set environment variables
45
+ ENV HOME=/home/user \
46
+ PATH=/home/user/.local/bin:$PATH \
47
+ PYTHONUNBUFFERED=1
48
+
49
+ # Hugging Face Spaces uses port 7860 by default
50
+ EXPOSE 7860
51
+
52
+ # Health check
53
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
54
+ CMD python -c "import requests; requests.get('http://localhost:7860/health')" || exit 1
55
+
56
+ # Run the application on port 7860 (Hugging Face default)
57
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
58
+
Dockerfile.local ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile for local development and testing
2
+ # This version uses the full GPU-enabled PyTorch and larger SAM model
3
+
4
+ FROM python:3.10-slim
5
+
6
+ WORKDIR /app
7
+
8
+ # Install system dependencies
9
+ RUN apt-get update && apt-get install -y \
10
+ libgl1-mesa-glx \
11
+ libglib2.0-0 \
12
+ libsm6 \
13
+ libxext6 \
14
+ libxrender-dev \
15
+ libgomp1 \
16
+ wget \
17
+ && rm -rf /var/lib/apt/lists/*
18
+
19
+ # Copy requirements
20
+ COPY requirements.txt .
21
+
22
+ # Install Python dependencies with GPU support
23
+ RUN pip install --no-cache-dir -r requirements.txt
24
+
25
+ # Copy application code
26
+ COPY main.py .
27
+
28
+ # Download SAM model (using vit_h for best quality in local development)
29
+ # Comment out if you want to mount the model from host
30
+ RUN wget -q https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth || \
31
+ echo "Warning: Could not download SAM model. App will run with fallback methods."
32
+
33
+ # Expose port 8000 for local development
34
+ EXPOSE 8000
35
+
36
+ # Health check
37
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
38
+ CMD python -c "import requests; requests.get('http://localhost:8000/health')" || exit 1
39
+
40
+ # Run the application
41
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]
42
+
README.md CHANGED
@@ -1,10 +1,326 @@
1
- ---
2
- title: Wallpaint
3
- emoji: 🌖
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Wall Color Visualizer - Backend
2
+
3
+ A FastAPI backend service that uses Meta's Segment Anything Model (SAM) for intelligent wall segmentation and color visualization.
4
+
5
+ ## Features
6
+
7
+ - **AI-Powered Segmentation**: Uses Meta's SAM model for accurate object segmentation
8
+ - **Fallback Support**: Traditional CV methods when SAM is unavailable
9
+ - **Color Application**: Real-time color overlay with adjustable opacity
10
+ - **REST API**: Clean and well-documented API endpoints
11
+ - **CORS Enabled**: Ready for cross-origin requests from Flutter app
12
+
13
+ ## Prerequisites
14
+
15
+ - Python 3.8 or higher
16
+ - CUDA-capable GPU (optional, for faster processing)
17
+ - At least 8GB RAM
18
+ - 5GB free disk space (for SAM model)
19
+
20
+ ## Installation
21
+
22
+ ### 1. Clone or Navigate to Backend Directory
23
+
24
+ ```bash
25
+ cd /media/aliroohan/hello/MAD/project/backend
26
+ ```
27
+
28
+ ### 2. Create Virtual Environment
29
+
30
+ ```bash
31
+ python3 -m venv venv
32
+ source venv/bin/activate # On Windows: venv\Scripts\activate
33
+ ```
34
+
35
+ ### 3. Install Dependencies
36
+
37
+ ```bash
38
+ pip install --upgrade pip
39
+ pip install -r requirements.txt
40
+ ```
41
+
42
+ ### 4. Download SAM Model
43
+
44
+ Download the SAM model checkpoint (choose one based on your needs):
45
+
46
+ **Option 1: Largest and Most Accurate (vit_h - 2.4GB)**
47
+ ```bash
48
+ wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
49
+ ```
50
+
51
+ **Option 2: Medium (vit_l - 1.2GB)**
52
+ ```bash
53
+ wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth
54
+ ```
55
+
56
+ **Option 3: Smallest and Fastest (vit_b - 375MB)**
57
+ ```bash
58
+ wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth
59
+ ```
60
+
61
+ If using a different model, update `sam_checkpoint` and `model_type` in `main.py`.
62
+
63
+ ### 5. Quick Setup (All-in-One)
64
+
65
+ Alternatively, run the setup script:
66
+
67
+ ```bash
68
+ chmod +x setup.sh
69
+ ./setup.sh
70
+ ```
71
+
72
+ ## Running the Server
73
+
74
+ ### Development Mode
75
+
76
+ ```bash
77
+ uvicorn main:app --reload --host 0.0.0.0 --port 8000
78
+ ```
79
+
80
+ ### Production Mode
81
+
82
+ ```bash
83
+ uvicorn main:app --host 0.0.0.0 --port 8000 --workers 4
84
+ ```
85
+
86
+ The server will start at `http://localhost:8000`
87
+
88
+ ## API Endpoints
89
+
90
+ ### Health Check
91
+
92
+ **GET** `/health`
93
+
94
+ Check if the server and SAM model are loaded.
95
+
96
+ **Response:**
97
+ ```json
98
+ {
99
+ "status": "healthy",
100
+ "device": "cuda",
101
+ "sam_model_loaded": true
102
+ }
103
+ ```
104
+
105
+ ### Automatic Segmentation
106
+
107
+ **POST** `/segment-automatic`
108
+
109
+ Automatically segments all objects in the image.
110
+
111
+ **Request:**
112
+ - Content-Type: `multipart/form-data`
113
+ - Body: `file` (image file)
114
+
115
+ **Response:**
116
+ ```json
117
+ {
118
+ "success": true,
119
+ "num_masks": 5,
120
+ "masks": [
121
+ {
122
+ "id": 0,
123
+ "mask_base64": "...",
124
+ "area": 50000,
125
+ "bbox": [x, y, width, height]
126
+ }
127
+ ],
128
+ "image_base64": "..."
129
+ }
130
+ ```
131
+
132
+ ### Simple Segmentation (Fallback)
133
+
134
+ **POST** `/simple-segment`
135
+
136
+ Uses traditional CV methods for segmentation.
137
+
138
+ Same request/response format as `/segment-automatic`.
139
+
140
+ ### Point-Based Segmentation
141
+
142
+ **POST** `/segment-point`
143
+
144
+ Segments object at a specific point in the image.
145
+
146
+ **Request:**
147
+ ```json
148
+ {
149
+ "image_base64": "...",
150
+ "point_x": 100.5,
151
+ "point_y": 200.5
152
+ }
153
+ ```
154
+
155
+ **Response:**
156
+ ```json
157
+ {
158
+ "success": true,
159
+ "mask_base64": "...",
160
+ "score": 0.95
161
+ }
162
+ ```
163
+
164
+ ### Apply Color
165
+
166
+ **POST** `/apply-color`
167
+
168
+ Applies color to a masked region.
169
+
170
+ **Request:**
171
+ ```json
172
+ {
173
+ "image_base64": "...",
174
+ "mask_base64": "...",
175
+ "color_hex": "#FF5733",
176
+ "opacity": 0.8
177
+ }
178
+ ```
179
+
180
+ **Response:**
181
+ ```json
182
+ {
183
+ "success": true,
184
+ "result_base64": "..."
185
+ }
186
+ ```
187
+
188
+ ## Configuration
189
+
190
+ ### Environment Variables
191
+
192
+ Create a `.env` file (see `.env.example`):
193
+
194
+ ```env
195
+ HOST=0.0.0.0
196
+ PORT=8000
197
+ SAM_CHECKPOINT=sam_vit_h_4b8939.pth
198
+ MODEL_TYPE=vit_h
199
+ DEVICE=cuda # or 'cpu'
200
+ ```
201
+
202
+ ### Model Selection
203
+
204
+ In `main.py`, modify:
205
+
206
+ ```python
207
+ sam_checkpoint = "sam_vit_h_4b8939.pth" # Path to model
208
+ model_type = "vit_h" # vit_h, vit_l, or vit_b
209
+ device = "cuda" # cuda or cpu
210
+ ```
211
+
212
+ ## Troubleshooting
213
+
214
+ ### CUDA Out of Memory
215
+
216
+ If you encounter CUDA memory errors:
217
+
218
+ 1. Use a smaller model (vit_b)
219
+ 2. Reduce image size in Flutter app
220
+ 3. Use CPU instead: `device = "cpu"`
221
+
222
+ ### Model Not Loading
223
+
224
+ 1. Verify checkpoint file exists in the correct location
225
+ 2. Check file integrity (download again if needed)
226
+ 3. Ensure sufficient RAM/VRAM
227
+
228
+ ### Slow Performance
229
+
230
+ - Use GPU (CUDA) instead of CPU
231
+ - Reduce image resolution
232
+ - Use smaller model (vit_b)
233
+
234
+ ## Testing
235
+
236
+ ### Test with cURL
237
+
238
+ ```bash
239
+ # Health check
240
+ curl http://localhost:8000/health
241
+
242
+ # Upload and segment
243
+ curl -X POST -F "file=@test_image.jpg" \
244
+ http://localhost:8000/segment-automatic
245
+ ```
246
+
247
+ ### Test with Python
248
+
249
+ ```python
250
+ import requests
251
+
252
+ # Health check
253
+ response = requests.get('http://localhost:8000/health')
254
+ print(response.json())
255
+
256
+ # Segment image
257
+ with open('test_image.jpg', 'rb') as f:
258
+ files = {'file': f}
259
+ response = requests.post(
260
+ 'http://localhost:8000/segment-automatic',
261
+ files=files
262
+ )
263
+ print(response.json())
264
+ ```
265
+
266
+ ## Performance
267
+
268
+ ### With CUDA (GPU)
269
+ - Segmentation: 2-5 seconds per image
270
+ - Color application: < 1 second
271
+
272
+ ### Without CUDA (CPU)
273
+ - Segmentation: 10-30 seconds per image
274
+ - Color application: < 1 second
275
+
276
+ ## Network Configuration
277
+
278
+ ### For Android Emulator
279
+ Use: `http://10.0.2.2:8000`
280
+
281
+ ### For iOS Simulator
282
+ Use: `http://localhost:8000`
283
+
284
+ ### For Real Devices
285
+ 1. Find your computer's IP address:
286
+ ```bash
287
+ # Linux/Mac
288
+ ip addr show
289
+ # or
290
+ ifconfig
291
+
292
+ # Windows
293
+ ipconfig
294
+ ```
295
+
296
+ 2. Use: `http://YOUR_IP:8000`
297
+ 3. Ensure firewall allows port 8000
298
+
299
+ ## Dependencies
300
+
301
+ - **fastapi**: Web framework
302
+ - **uvicorn**: ASGI server
303
+ - **segment-anything**: Meta's SAM model
304
+ - **torch**: PyTorch for deep learning
305
+ - **opencv-python**: Image processing
306
+ - **pillow**: Image manipulation
307
+ - **numpy**: Numerical operations
308
+
309
+ ## License
310
+
311
+ This project uses Meta's Segment Anything Model. See SAM's license for details.
312
+
313
+ ## Support
314
+
315
+ For issues or questions:
316
+ 1. Check the troubleshooting section
317
+ 2. Verify all dependencies are installed
318
+ 3. Ensure the model is downloaded correctly
319
+ 4. Check server logs for detailed error messages
320
+
321
+ ## Credits
322
+
323
+ - Meta AI for the Segment Anything Model
324
+ - FastAPI framework
325
+ - OpenCV community
326
+
README_HUGGINGFACE.md ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Wall Color Visualizer API - Hugging Face Deployment
2
+
3
+ This guide explains how to deploy the Wall Color Visualizer API to Hugging Face Spaces.
4
+
5
+ ## 🚀 Quick Deployment
6
+
7
+ ### Option 1: Deploy via Hugging Face Web Interface
8
+
9
+ 1. **Create a new Space:**
10
+ - Go to https://huggingface.co/new-space
11
+ - Choose a name for your Space
12
+ - Select **Docker** as the Space SDK
13
+ - Choose **Public** or **Private**
14
+ - Click "Create Space"
15
+
16
+ 2. **Upload files:**
17
+ - Upload `Dockerfile`
18
+ - Upload `main.py`
19
+ - Upload `requirements.txt`
20
+ - The Space will automatically build and deploy
21
+
22
+ 3. **Access your API:**
23
+ - Your API will be available at: `https://YOUR-USERNAME-SPACENAME.hf.space`
24
+ - API docs: `https://YOUR-USERNAME-SPACENAME.hf.space/docs`
25
+
26
+ ### Option 2: Deploy via Git
27
+
28
+ ```bash
29
+ # Clone your Hugging Face Space repository
30
+ git clone https://huggingface.co/spaces/YOUR-USERNAME/YOUR-SPACE-NAME
31
+ cd YOUR-SPACE-NAME
32
+
33
+ # Copy the necessary files
34
+ cp path/to/backend/Dockerfile .
35
+ cp path/to/backend/main.py .
36
+ cp path/to/backend/requirements.txt .
37
+
38
+ # Commit and push
39
+ git add .
40
+ git commit -m "Initial deployment"
41
+ git push
42
+ ```
43
+
44
+ ## 📋 Important Notes
45
+
46
+ ### Port Configuration
47
+ - Hugging Face Spaces uses **port 7860** by default
48
+ - The Dockerfile is already configured for this
49
+ - Update your Flutter app's API URL to use the Hugging Face Space URL
50
+
51
+ ### Model Selection
52
+ The Dockerfile uses the **SAM ViT-B** model (smallest, ~375MB) to fit within Hugging Face's constraints:
53
+ - `sam_vit_b_01ec64.pth` - Base model (fastest, good quality)
54
+
55
+ If you need better quality and have more resources, you can modify the Dockerfile to use:
56
+ - `sam_vit_l_0b3195.pth` - Large model (~1.2GB)
57
+ - `sam_vit_h_4b8939.pth` - Huge model (~2.4GB)
58
+
59
+ ### Hardware Requirements
60
+ - **CPU Only**: The Dockerfile is configured for CPU inference (free tier)
61
+ - **GPU**: Upgrade to GPU Space for better performance
62
+ - Go to Space Settings → Change Hardware → Select GPU
63
+
64
+ ### Resource Limits (Free Tier)
65
+ - **CPU**: 2 vCPUs
66
+ - **RAM**: 16 GB
67
+ - **Storage**: 50 GB
68
+ - **Timeout**: 60 seconds per request
69
+
70
+ ## 🔧 Configuration
71
+
72
+ ### Environment Variables
73
+ You can add environment variables in the Space Settings:
74
+
75
+ ```bash
76
+ # Optional: Set model type
77
+ MODEL_TYPE=vit_b
78
+ SAM_CHECKPOINT=sam_vit_b_01ec64.pth
79
+
80
+ # Optional: Enable/disable features
81
+ ENABLE_GPU=false
82
+ ```
83
+
84
+ ### Modify Dockerfile for GPU
85
+ If you have a GPU Space, update the Dockerfile:
86
+
87
+ ```dockerfile
88
+ # Change this line in the Dockerfile:
89
+ RUN pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu
90
+
91
+ # To this for GPU support:
92
+ RUN pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cu118
93
+ ```
94
+
95
+ ## 🧪 Testing Your Deployment
96
+
97
+ Once deployed, test your API:
98
+
99
+ ```bash
100
+ # Health check
101
+ curl https://YOUR-USERNAME-SPACENAME.hf.space/health
102
+
103
+ # Root endpoint
104
+ curl https://YOUR-USERNAME-SPACENAME.hf.space/
105
+ ```
106
+
107
+ Or visit the API docs directly:
108
+ ```
109
+ https://YOUR-USERNAME-SPACENAME.hf.space/docs
110
+ ```
111
+
112
+ ## 🔄 Updating Your API in Flutter App
113
+
114
+ Update the API URL in your Flutter app's `lib/services/api_service.dart`:
115
+
116
+ ```dart
117
+ class ApiService {
118
+ // Change from localhost to your Hugging Face Space URL
119
+ static const String baseUrl = 'https://YOUR-USERNAME-SPACENAME.hf.space';
120
+
121
+ // Rest of your code...
122
+ }
123
+ ```
124
+
125
+ ## 📊 Monitoring
126
+
127
+ - **Logs**: View logs in the Space page under "Logs" tab
128
+ - **Metrics**: Check Space settings for usage metrics
129
+ - **Status**: Green dot indicates the Space is running
130
+
131
+ ## 🐛 Troubleshooting
132
+
133
+ ### Build Fails
134
+ - Check logs in the "Logs" tab
135
+ - Ensure all dependencies are in `requirements.txt`
136
+ - Verify Python version compatibility
137
+
138
+ ### Out of Memory
139
+ - Use the smaller SAM model (vit_b)
140
+ - Reduce batch size or image size
141
+ - Consider upgrading to a larger Space
142
+
143
+ ### Slow Response Times
144
+ - First request is slower (model loading)
145
+ - Consider persistent storage for models
146
+ - Upgrade to GPU Space for faster inference
147
+
148
+ ### Connection Issues
149
+ - Ensure port 7860 is exposed
150
+ - Check CORS settings in `main.py`
151
+ - Verify Space is in "Running" state
152
+
153
+ ## 💡 Optimization Tips
154
+
155
+ 1. **Use persistent storage** for SAM models to avoid downloading on every restart
156
+ 2. **Enable caching** for frequently processed images
157
+ 3. **Implement request queuing** for high traffic
158
+ 4. **Use smaller images** (resize before processing)
159
+ 5. **Upgrade to GPU Space** for production use
160
+
161
+ ## 📝 Additional Resources
162
+
163
+ - [Hugging Face Spaces Documentation](https://huggingface.co/docs/hub/spaces)
164
+ - [Docker Spaces Guide](https://huggingface.co/docs/hub/spaces-sdks-docker)
165
+ - [FastAPI Documentation](https://fastapi.tiangolo.com/)
166
+ - [Segment Anything Documentation](https://github.com/facebookresearch/segment-anything)
167
+
168
+ ## 🆘 Support
169
+
170
+ If you encounter issues:
171
+ 1. Check the [Hugging Face Discord](https://discord.gg/hugging-face)
172
+ 2. Review [Spaces documentation](https://huggingface.co/docs/hub/spaces)
173
+ 3. Open an issue in your repository
174
+
175
+ ## 📜 License
176
+
177
+ Make sure to comply with:
178
+ - Segment Anything Model license
179
+ - Your project license
180
+ - Hugging Face Terms of Service
181
+
docker-build.sh ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Docker Build and Test Script for Wall Color Visualizer API
4
+
5
+ set -e # Exit on error
6
+
7
+ echo "🐳 Wall Color Visualizer - Docker Build Script"
8
+ echo "=============================================="
9
+ echo ""
10
+
11
+ # Colors for output
12
+ RED='\033[0;31m'
13
+ GREEN='\033[0;32m'
14
+ YELLOW='\033[1;33m'
15
+ NC='\033[0m' # No Color
16
+
17
+ # Function to print colored output
18
+ print_success() {
19
+ echo -e "${GREEN}✓ $1${NC}"
20
+ }
21
+
22
+ print_error() {
23
+ echo -e "${RED}✗ $1${NC}"
24
+ }
25
+
26
+ print_warning() {
27
+ echo -e "${YELLOW}⚠ $1${NC}"
28
+ }
29
+
30
+ print_info() {
31
+ echo -e "ℹ $1"
32
+ }
33
+
34
+ # Parse command line arguments
35
+ BUILD_TYPE="${1:-huggingface}" # Default to huggingface
36
+ IMAGE_NAME="wall-color-api"
37
+ CONTAINER_NAME="wall-color-api-test"
38
+
39
+ case $BUILD_TYPE in
40
+ "huggingface"|"hf")
41
+ DOCKERFILE="Dockerfile"
42
+ PORT=7860
43
+ print_info "Building for Hugging Face (port $PORT)"
44
+ ;;
45
+ "local")
46
+ DOCKERFILE="Dockerfile.local"
47
+ PORT=8000
48
+ print_info "Building for local development (port $PORT)"
49
+ ;;
50
+ "compose")
51
+ print_info "Using Docker Compose"
52
+ echo ""
53
+ docker-compose up --build
54
+ exit 0
55
+ ;;
56
+ *)
57
+ print_error "Unknown build type: $BUILD_TYPE"
58
+ echo "Usage: $0 [huggingface|local|compose]"
59
+ exit 1
60
+ ;;
61
+ esac
62
+
63
+ echo ""
64
+
65
+ # Check if Docker is installed
66
+ if ! command -v docker &> /dev/null; then
67
+ print_error "Docker is not installed!"
68
+ echo "Please install Docker first: https://docs.docker.com/get-docker/"
69
+ exit 1
70
+ fi
71
+ print_success "Docker is installed"
72
+
73
+ # Check if Dockerfile exists
74
+ if [ ! -f "$DOCKERFILE" ]; then
75
+ print_error "Dockerfile '$DOCKERFILE' not found!"
76
+ exit 1
77
+ fi
78
+ print_success "Dockerfile found: $DOCKERFILE"
79
+
80
+ echo ""
81
+ print_info "Step 1: Building Docker image..."
82
+ echo ""
83
+
84
+ # Build the Docker image
85
+ if docker build -f "$DOCKERFILE" -t "$IMAGE_NAME:$BUILD_TYPE" .; then
86
+ print_success "Docker image built successfully!"
87
+ else
88
+ print_error "Docker build failed!"
89
+ exit 1
90
+ fi
91
+
92
+ echo ""
93
+ print_info "Step 2: Stopping any existing containers..."
94
+
95
+ # Stop and remove existing container if running
96
+ if docker ps -a | grep -q "$CONTAINER_NAME"; then
97
+ docker stop "$CONTAINER_NAME" 2>/dev/null || true
98
+ docker rm "$CONTAINER_NAME" 2>/dev/null || true
99
+ print_success "Cleaned up existing container"
100
+ fi
101
+
102
+ echo ""
103
+ print_info "Step 3: Starting container..."
104
+ echo ""
105
+
106
+ # Run the container
107
+ if docker run -d \
108
+ --name "$CONTAINER_NAME" \
109
+ -p "$PORT:$PORT" \
110
+ "$IMAGE_NAME:$BUILD_TYPE"; then
111
+ print_success "Container started successfully!"
112
+ else
113
+ print_error "Failed to start container!"
114
+ exit 1
115
+ fi
116
+
117
+ echo ""
118
+ print_info "Step 4: Waiting for API to be ready..."
119
+
120
+ # Wait for the API to be ready
121
+ MAX_ATTEMPTS=30
122
+ ATTEMPT=0
123
+ while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do
124
+ if curl -s "http://localhost:$PORT/health" > /dev/null 2>&1; then
125
+ print_success "API is ready!"
126
+ break
127
+ fi
128
+ ATTEMPT=$((ATTEMPT + 1))
129
+ if [ $ATTEMPT -eq $MAX_ATTEMPTS ]; then
130
+ print_error "API failed to start within 30 seconds"
131
+ echo ""
132
+ echo "Container logs:"
133
+ docker logs "$CONTAINER_NAME"
134
+ exit 1
135
+ fi
136
+ echo -n "."
137
+ sleep 1
138
+ done
139
+
140
+ echo ""
141
+ echo ""
142
+ print_success "Deployment successful!"
143
+ echo ""
144
+ echo "=============================================="
145
+ echo "📊 Container Information:"
146
+ echo "=============================================="
147
+ echo "Container Name: $CONTAINER_NAME"
148
+ echo "Image: $IMAGE_NAME:$BUILD_TYPE"
149
+ echo ""
150
+ echo "🌐 Access URLs:"
151
+ echo " - API Root: http://localhost:$PORT/"
152
+ echo " - Health Check: http://localhost:$PORT/health"
153
+ echo " - API Docs: http://localhost:$PORT/docs"
154
+ echo ""
155
+ echo "🔧 Useful Commands:"
156
+ echo " - View logs: docker logs -f $CONTAINER_NAME"
157
+ echo " - Stop: docker stop $CONTAINER_NAME"
158
+ echo " - Remove: docker rm $CONTAINER_NAME"
159
+ echo " - Shell access: docker exec -it $CONTAINER_NAME /bin/bash"
160
+ echo ""
161
+ echo "=============================================="
162
+
163
+ # Test the API
164
+ echo ""
165
+ print_info "Running quick API test..."
166
+ echo ""
167
+
168
+ HEALTH_RESPONSE=$(curl -s "http://localhost:$PORT/health")
169
+ echo "Health check response:"
170
+ echo "$HEALTH_RESPONSE" | python3 -m json.tool 2>/dev/null || echo "$HEALTH_RESPONSE"
171
+
172
+ echo ""
173
+ print_success "All tests passed!"
174
+ echo ""
175
+ print_warning "Press Ctrl+C to stop viewing logs, container will keep running"
176
+ echo ""
177
+
178
+ # Follow logs
179
+ docker logs -f "$CONTAINER_NAME"
180
+
docker-compose.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ api:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile.local
8
+ container_name: wall-color-api
9
+ ports:
10
+ - "8000:8000"
11
+ volumes:
12
+ # Mount code for hot reload during development
13
+ - ./main.py:/app/main.py
14
+ # Mount SAM model if you have it locally (to avoid downloading)
15
+ # - ./sam_vit_h_4b8939.pth:/app/sam_vit_h_4b8939.pth
16
+ environment:
17
+ - PYTHONUNBUFFERED=1
18
+ restart: unless-stopped
19
+ # Uncomment the following lines if you have NVIDIA GPU
20
+ # deploy:
21
+ # resources:
22
+ # reservations:
23
+ # devices:
24
+ # - driver: nvidia
25
+ # count: 1
26
+ # capabilities: [gpu]
27
+
28
+ # Optional: Add nginx reverse proxy for production
29
+ # nginx:
30
+ # image: nginx:alpine
31
+ # container_name: wall-color-nginx
32
+ # ports:
33
+ # - "80:80"
34
+ # volumes:
35
+ # - ./nginx.conf:/etc/nginx/nginx.conf:ro
36
+ # depends_on:
37
+ # - api
38
+ # restart: unless-stopped
39
+
main.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import StreamingResponse
4
+ from pydantic import BaseModel
5
+ import numpy as np
6
+ import cv2
7
+ from PIL import Image
8
+ import io
9
+ import base64
10
+ from typing import List, Optional
11
+ import torch
12
+ from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor
13
+ import uvicorn
14
+
15
+ app = FastAPI(title="Wall Color Visualizer API")
16
+
17
+ # Configure CORS
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"],
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ # Global variables for SAM model
27
+ sam_checkpoint = "sam_vit_h_4b8939.pth"
28
+ model_type = "vit_h"
29
+ device = "cuda" if torch.cuda.is_available() else "cpu"
30
+ sam = None
31
+ mask_generator = None
32
+ predictor = None
33
+
34
+ # Request models
35
+ class SegmentRequest(BaseModel):
36
+ image_base64: str
37
+ point_x: Optional[float] = None
38
+ point_y: Optional[float] = None
39
+
40
+ class ColorChangeRequest(BaseModel):
41
+ image_base64: str
42
+ mask_base64: str
43
+ color_hex: str
44
+ opacity: float = 0.8
45
+
46
+ # Initialize SAM model
47
+ def initialize_sam():
48
+ global sam, mask_generator, predictor
49
+ try:
50
+ print(f"Loading SAM model on {device}...")
51
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
52
+ sam.to(device=device)
53
+ mask_generator = SamAutomaticMaskGenerator(sam)
54
+ predictor = SamPredictor(sam)
55
+ print("SAM model loaded successfully!")
56
+ except Exception as e:
57
+ print(f"Warning: Could not load SAM model: {e}")
58
+ print("The API will run but segmentation features will be limited.")
59
+
60
+ @app.on_event("startup")
61
+ async def startup_event():
62
+ initialize_sam()
63
+
64
+ @app.get("/")
65
+ async def root():
66
+ return {
67
+ "message": "Wall Color Visualizer API",
68
+ "status": "running",
69
+ "sam_loaded": sam is not None
70
+ }
71
+
72
+ @app.get("/health")
73
+ async def health_check():
74
+ return {
75
+ "status": "healthy",
76
+ "device": device,
77
+ "sam_model_loaded": sam is not None
78
+ }
79
+
80
+ def decode_base64_image(base64_string: str) -> np.ndarray:
81
+ """Decode base64 string to numpy array image"""
82
+ try:
83
+ # Remove data URL prefix if present
84
+ if "base64," in base64_string:
85
+ base64_string = base64_string.split("base64,")[1]
86
+
87
+ img_data = base64.b64decode(base64_string)
88
+ img = Image.open(io.BytesIO(img_data))
89
+ img_array = np.array(img.convert("RGB"))
90
+ return img_array
91
+ except Exception as e:
92
+ raise HTTPException(status_code=400, detail=f"Invalid image data: {str(e)}")
93
+
94
+ def encode_image_to_base64(image: np.ndarray) -> str:
95
+ """Encode numpy array image to base64 string"""
96
+ img = Image.fromarray(image.astype(np.uint8))
97
+ buffered = io.BytesIO()
98
+ img.save(buffered, format="PNG")
99
+ img_str = base64.b64encode(buffered.getvalue()).decode()
100
+ return img_str
101
+
102
+ def encode_mask_to_base64(mask: np.ndarray) -> str:
103
+ """Encode binary mask to base64 string"""
104
+ mask_uint8 = (mask * 255).astype(np.uint8)
105
+ img = Image.fromarray(mask_uint8)
106
+ buffered = io.BytesIO()
107
+ img.save(buffered, format="PNG")
108
+ mask_str = base64.b64encode(buffered.getvalue()).decode()
109
+ return mask_str
110
+
111
+ def hex_to_rgb(hex_color: str) -> tuple:
112
+ """Convert hex color to RGB tuple"""
113
+ hex_color = hex_color.lstrip('#')
114
+ return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
115
+
116
+ @app.post("/segment-automatic")
117
+ async def segment_automatic(file: UploadFile = File(...)):
118
+ """Automatically segment all objects in the image"""
119
+ if sam is None:
120
+ raise HTTPException(status_code=503, detail="SAM model not loaded")
121
+
122
+ try:
123
+ # Read and decode image
124
+ contents = await file.read()
125
+ image = Image.open(io.BytesIO(contents))
126
+ image_np = np.array(image.convert("RGB"))
127
+
128
+ # Generate masks
129
+ masks = mask_generator.generate(image_np)
130
+
131
+ # Sort masks by area (largest first)
132
+ masks = sorted(masks, key=lambda x: x['area'], reverse=True)
133
+
134
+ # Return top masks
135
+ result_masks = []
136
+ for i, mask_data in enumerate(masks[:10]): # Return top 10 masks
137
+ mask = mask_data['segmentation']
138
+ result_masks.append({
139
+ "id": i,
140
+ "mask_base64": encode_mask_to_base64(mask),
141
+ "area": int(mask_data['area']),
142
+ "bbox": [int(x) for x in mask_data['bbox']]
143
+ })
144
+
145
+ return {
146
+ "success": True,
147
+ "num_masks": len(result_masks),
148
+ "masks": result_masks,
149
+ "image_base64": encode_image_to_base64(image_np)
150
+ }
151
+
152
+ except Exception as e:
153
+ raise HTTPException(status_code=500, detail=f"Segmentation failed: {str(e)}")
154
+
155
+ @app.post("/segment-point")
156
+ async def segment_point(request: SegmentRequest):
157
+ """Segment object at a specific point in the image"""
158
+ if sam is None:
159
+ raise HTTPException(status_code=503, detail="SAM model not loaded")
160
+
161
+ try:
162
+ # Decode image
163
+ image_np = decode_base64_image(request.image_base64)
164
+
165
+ # Set image for predictor
166
+ predictor.set_image(image_np)
167
+
168
+ # Use point prompt
169
+ if request.point_x is not None and request.point_y is not None:
170
+ point_coords = np.array([[request.point_x, request.point_y]])
171
+ point_labels = np.array([1]) # 1 = foreground point
172
+
173
+ masks, scores, logits = predictor.predict(
174
+ point_coords=point_coords,
175
+ point_labels=point_labels,
176
+ multimask_output=True
177
+ )
178
+
179
+ # Get the best mask (highest score)
180
+ best_mask_idx = np.argmax(scores)
181
+ best_mask = masks[best_mask_idx]
182
+
183
+ return {
184
+ "success": True,
185
+ "mask_base64": encode_mask_to_base64(best_mask),
186
+ "score": float(scores[best_mask_idx])
187
+ }
188
+ else:
189
+ raise HTTPException(status_code=400, detail="Point coordinates required")
190
+
191
+ except Exception as e:
192
+ raise HTTPException(status_code=500, detail=f"Segmentation failed: {str(e)}")
193
+
194
+ @app.post("/apply-color")
195
+ async def apply_color(request: ColorChangeRequest):
196
+ """Apply color to masked region of the image"""
197
+ try:
198
+ # Decode image and mask
199
+ image_np = decode_base64_image(request.image_base64)
200
+ mask_np = decode_base64_image(request.mask_base64)
201
+
202
+ # Convert mask to binary
203
+ if len(mask_np.shape) == 3:
204
+ mask_np = cv2.cvtColor(mask_np, cv2.COLOR_RGB2GRAY)
205
+ mask_binary = (mask_np > 128).astype(np.uint8)
206
+
207
+ # Convert hex color to RGB
208
+ rgb_color = hex_to_rgb(request.color_hex)
209
+
210
+ # Create colored overlay
211
+ colored_mask = np.zeros_like(image_np)
212
+ colored_mask[mask_binary == 1] = rgb_color
213
+
214
+ # Blend with original image
215
+ result = image_np.copy().astype(float)
216
+ alpha = request.opacity
217
+ result[mask_binary == 1] = (
218
+ alpha * colored_mask[mask_binary == 1] +
219
+ (1 - alpha) * image_np[mask_binary == 1]
220
+ )
221
+ result = result.astype(np.uint8)
222
+
223
+ return {
224
+ "success": True,
225
+ "result_base64": encode_image_to_base64(result)
226
+ }
227
+
228
+ except Exception as e:
229
+ raise HTTPException(status_code=500, detail=f"Color application failed: {str(e)}")
230
+
231
+ @app.post("/simple-segment")
232
+ async def simple_segment(file: UploadFile = File(...)):
233
+ """Simple segmentation using traditional CV methods (fallback when SAM not available)"""
234
+ try:
235
+ # Read and decode image
236
+ contents = await file.read()
237
+ image = Image.open(io.BytesIO(contents))
238
+ image_np = np.array(image.convert("RGB"))
239
+
240
+ # Convert to different color spaces for better wall detection
241
+ hsv = cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV)
242
+ gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
243
+
244
+ # Apply edge detection
245
+ edges = cv2.Canny(gray, 50, 150)
246
+
247
+ # Dilate edges to create connected regions
248
+ kernel = np.ones((5, 5), np.uint8)
249
+ dilated = cv2.dilate(edges, kernel, iterations=2)
250
+
251
+ # Find contours
252
+ contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
253
+
254
+ # Create masks for largest contours
255
+ result_masks = []
256
+ h, w = image_np.shape[:2]
257
+
258
+ # Sort by area
259
+ contours = sorted(contours, key=cv2.contourArea, reverse=True)
260
+
261
+ for i, contour in enumerate(contours[:5]): # Top 5 regions
262
+ area = cv2.contourArea(contour)
263
+ if area < (h * w * 0.01): # Skip very small regions
264
+ continue
265
+
266
+ mask = np.zeros((h, w), dtype=np.uint8)
267
+ cv2.drawContours(mask, [contour], -1, 255, -1)
268
+
269
+ # Get bounding box
270
+ x, y, bw, bh = cv2.boundingRect(contour)
271
+
272
+ result_masks.append({
273
+ "id": i,
274
+ "mask_base64": encode_mask_to_base64(mask / 255),
275
+ "area": int(area),
276
+ "bbox": [int(x), int(y), int(bw), int(bh)]
277
+ })
278
+
279
+ return {
280
+ "success": True,
281
+ "num_masks": len(result_masks),
282
+ "masks": result_masks,
283
+ "image_base64": encode_image_to_base64(image_np),
284
+ "method": "traditional_cv"
285
+ }
286
+
287
+ except Exception as e:
288
+ raise HTTPException(status_code=500, detail=f"Segmentation failed: {str(e)}")
289
+
290
+ if __name__ == "__main__":
291
+ uvicorn.run(app, host="0.0.0.0", port=8000)
292
+
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ python-multipart
4
+ pillow
5
+ numpy
6
+ opencv-python
7
+ torch
8
+ torchvision
9
+ segment-anything
10
+ pydantic
11
+ python-jose[cryptography]
12
+
run.sh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Wall Color Visualizer Backend Runner Script
4
+
5
+ echo "🎨 Starting Wall Color Visualizer Backend..."
6
+ echo "==========================================="
7
+ echo ""
8
+
9
+ # Check if virtual environment exists
10
+ if [ ! -d "venv" ]; then
11
+ echo "❌ Virtual environment not found!"
12
+ echo "Please run setup.sh first:"
13
+ echo " ./setup.sh"
14
+ exit 1
15
+ fi
16
+
17
+ # Activate virtual environment
18
+ echo "📦 Activating virtual environment..."
19
+ source venv/bin/activate
20
+
21
+ # Check if SAM model exists
22
+ if [ ! -f "sam_vit_h_4b8939.pth" ] && [ ! -f "sam_vit_l_0b3195.pth" ] && [ ! -f "sam_vit_b_01ec64.pth" ]; then
23
+ echo "⚠️ Warning: SAM model not found!"
24
+ echo "The API will work with fallback methods but won't have AI segmentation."
25
+ echo ""
26
+ echo "To download SAM model, run:"
27
+ echo " wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
28
+ echo ""
29
+ fi
30
+
31
+ # Get local IP address
32
+ LOCAL_IP=$(hostname -I | awk '{print $1}')
33
+
34
+ echo "🚀 Starting FastAPI server..."
35
+ echo ""
36
+ echo "Server will be available at:"
37
+ echo " - Local: http://localhost:8000"
38
+ echo " - Network: http://$LOCAL_IP:8000"
39
+ echo " - Health Check: http://localhost:8000/health"
40
+ echo " - API Docs: http://localhost:8000/docs"
41
+ echo ""
42
+ echo "For Flutter app configuration:"
43
+ echo " - Android Emulator: http://10.0.2.2:8000"
44
+ echo " - iOS Simulator: http://localhost:8000"
45
+ echo " - Real Device: http://$LOCAL_IP:8000"
46
+ echo ""
47
+ echo "Press Ctrl+C to stop the server"
48
+ echo "==========================================="
49
+ echo ""
50
+
51
+ # Start server
52
+ uvicorn main:app --reload --host 0.0.0.0 --port 8000
53
+
setup.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ echo "Setting up Wall Color Visualizer Backend..."
4
+
5
+ # Create virtual environment
6
+ python3 -m venv venv
7
+ source venv/bin/activate
8
+
9
+ # Upgrade pip
10
+ pip install --upgrade pip
11
+
12
+ # Install dependencies
13
+ pip install -r requirements.txt
14
+
15
+ # Download SAM model checkpoint (vit_h - largest and most accurate)
16
+ echo "Downloading SAM model checkpoint..."
17
+ wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
18
+
19
+ # Alternative: Download smaller models if needed
20
+ # wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth
21
+ # wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth
22
+
23
+ echo "Setup complete!"
24
+ echo "To start the server, run: uvicorn main:app --reload --host 0.0.0.0 --port 8000"
25
+
test_api.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for Wall Color Visualizer API
4
+ """
5
+
6
+ import requests
7
+ import base64
8
+ import json
9
+ from pathlib import Path
10
+
11
+ # Configuration
12
+ BASE_URL = "http://localhost:8000"
13
+
14
+ def test_health():
15
+ """Test health endpoint"""
16
+ print("Testing health endpoint...")
17
+ try:
18
+ response = requests.get(f"{BASE_URL}/health")
19
+ print(f"Status: {response.status_code}")
20
+ print(f"Response: {json.dumps(response.json(), indent=2)}")
21
+ return response.status_code == 200
22
+ except Exception as e:
23
+ print(f"Error: {e}")
24
+ return False
25
+
26
+ def test_simple_segment(image_path):
27
+ """Test simple segmentation endpoint"""
28
+ print(f"\nTesting simple segmentation with {image_path}...")
29
+
30
+ if not Path(image_path).exists():
31
+ print(f"Error: Image file not found: {image_path}")
32
+ return False
33
+
34
+ try:
35
+ with open(image_path, 'rb') as f:
36
+ files = {'file': f}
37
+ response = requests.post(
38
+ f"{BASE_URL}/simple-segment",
39
+ files=files,
40
+ timeout=60
41
+ )
42
+
43
+ print(f"Status: {response.status_code}")
44
+ if response.status_code == 200:
45
+ data = response.json()
46
+ print(f"Success: {data['success']}")
47
+ print(f"Number of masks: {data['num_masks']}")
48
+ print(f"Method: {data.get('method', 'N/A')}")
49
+ return True
50
+ else:
51
+ print(f"Error: {response.text}")
52
+ return False
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return False
56
+
57
+ def test_segment_automatic(image_path):
58
+ """Test automatic segmentation endpoint (requires SAM)"""
59
+ print(f"\nTesting automatic segmentation with {image_path}...")
60
+
61
+ if not Path(image_path).exists():
62
+ print(f"Error: Image file not found: {image_path}")
63
+ return False
64
+
65
+ try:
66
+ with open(image_path, 'rb') as f:
67
+ files = {'file': f}
68
+ response = requests.post(
69
+ f"{BASE_URL}/segment-automatic",
70
+ files=files,
71
+ timeout=60
72
+ )
73
+
74
+ print(f"Status: {response.status_code}")
75
+ if response.status_code == 200:
76
+ data = response.json()
77
+ print(f"Success: {data['success']}")
78
+ print(f"Number of masks: {data['num_masks']}")
79
+ return True
80
+ else:
81
+ print(f"Error: {response.text}")
82
+ return False
83
+ except Exception as e:
84
+ print(f"Error: {e}")
85
+ return False
86
+
87
+ def test_apply_color(image_path):
88
+ """Test color application (requires existing segmentation)"""
89
+ print(f"\nTesting color application...")
90
+
91
+ # First, get a segmentation
92
+ if not Path(image_path).exists():
93
+ print(f"Error: Image file not found: {image_path}")
94
+ return False
95
+
96
+ try:
97
+ # Get segmentation
98
+ with open(image_path, 'rb') as f:
99
+ files = {'file': f}
100
+ seg_response = requests.post(
101
+ f"{BASE_URL}/simple-segment",
102
+ files=files,
103
+ timeout=60
104
+ )
105
+
106
+ if seg_response.status_code != 200:
107
+ print("Failed to get segmentation")
108
+ return False
109
+
110
+ seg_data = seg_response.json()
111
+ if not seg_data['masks']:
112
+ print("No masks found")
113
+ return False
114
+
115
+ # Apply color to first mask
116
+ image_base64 = seg_data['image_base64']
117
+ mask_base64 = seg_data['masks'][0]['mask_base64']
118
+
119
+ color_request = {
120
+ 'image_base64': image_base64,
121
+ 'mask_base64': mask_base64,
122
+ 'color_hex': '#FF5733', # Orange-red color
123
+ 'opacity': 0.8
124
+ }
125
+
126
+ response = requests.post(
127
+ f"{BASE_URL}/apply-color",
128
+ json=color_request,
129
+ timeout=60
130
+ )
131
+
132
+ print(f"Status: {response.status_code}")
133
+ if response.status_code == 200:
134
+ data = response.json()
135
+ print(f"Success: {data['success']}")
136
+ print("Color applied successfully!")
137
+
138
+ # Optionally save result
139
+ if data.get('result_base64'):
140
+ result_bytes = base64.b64decode(data['result_base64'])
141
+ output_path = 'result_colored.png'
142
+ with open(output_path, 'wb') as f:
143
+ f.write(result_bytes)
144
+ print(f"Result saved to: {output_path}")
145
+
146
+ return True
147
+ else:
148
+ print(f"Error: {response.text}")
149
+ return False
150
+ except Exception as e:
151
+ print(f"Error: {e}")
152
+ return False
153
+
154
+ def main():
155
+ """Run all tests"""
156
+ print("=" * 60)
157
+ print("Wall Color Visualizer API Test Suite")
158
+ print("=" * 60)
159
+
160
+ results = {}
161
+
162
+ # Test 1: Health check
163
+ results['health'] = test_health()
164
+
165
+ # Ask for test image
166
+ print("\n" + "=" * 60)
167
+ image_path = input("Enter path to test image (or press Enter to skip): ").strip()
168
+
169
+ if image_path and Path(image_path).exists():
170
+ # Test 2: Simple segmentation
171
+ results['simple_segment'] = test_simple_segment(image_path)
172
+
173
+ # Test 3: Automatic segmentation (SAM)
174
+ results['auto_segment'] = test_segment_automatic(image_path)
175
+
176
+ # Test 4: Color application
177
+ results['apply_color'] = test_apply_color(image_path)
178
+ else:
179
+ print("Skipping image-based tests...")
180
+
181
+ # Summary
182
+ print("\n" + "=" * 60)
183
+ print("Test Results Summary")
184
+ print("=" * 60)
185
+ for test_name, passed in results.items():
186
+ status = "✓ PASSED" if passed else "✗ FAILED"
187
+ print(f"{test_name:20} : {status}")
188
+
189
+ total = len(results)
190
+ passed = sum(results.values())
191
+ print(f"\nTotal: {passed}/{total} tests passed")
192
+ print("=" * 60)
193
+
194
+ if __name__ == "__main__":
195
+ main()
196
+