Hasthika commited on
Commit
336b59e
·
verified ·
1 Parent(s): a556470

Experience Eats Backend V1

Browse files
Files changed (3) hide show
  1. Dockerfile +37 -0
  2. main.py +210 -0
  3. requirements.txt +8 -0
Dockerfile ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use official Python image with a slim footprint
2
+ FROM python:3.10-slim
3
+
4
+ # Set the working directory inside the container
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies (required for some Python ML packages and OpenCV if needed later)
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Create a non-root user required by Hugging Face Spaces
13
+ RUN useradd -m -u 1000 user
14
+
15
+ # Copy requirements first to leverage Docker cache
16
+ COPY requirements.txt .
17
+
18
+ # Install Python dependencies
19
+ # We use --no-cache-dir to keep the image size small
20
+ RUN pip install --no-cache-dir -r requirements.txt
21
+
22
+ # Copy the rest of the application code
23
+ COPY . .
24
+
25
+ # Create the storage directories explicitly and ensure appropriate permissions
26
+ RUN mkdir -p /app/storage/uploads /app/storage/processed
27
+ RUN chown -R user:user /app
28
+
29
+ # Switch to the non-root user
30
+ USER user
31
+
32
+ # Hugging Face Spaces exposes port 7860 by default
33
+ EXPOSE 7860
34
+
35
+ # Start the FastAPI application
36
+ # Hugging Face expects the server to run on 0.0.0.0:7860
37
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException, BackgroundTasks
2
+ from fastapi.staticfiles import StaticFiles
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import os
5
+ import shutil
6
+ import uuid
7
+ import requests
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+
12
+ app = FastAPI(title="Experience Eats 2.5D Processing API")
13
+
14
+ # Configure CORS for local development
15
+ app.add_middleware(
16
+ CORSMiddleware,
17
+ allow_origins=["http://localhost:3000"],
18
+ allow_credentials=True,
19
+ allow_methods=["*"],
20
+ allow_headers=["*"],
21
+ )
22
+
23
+ # Setup directories
24
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
25
+ UPLOAD_DIR = os.path.join(BASE_DIR, "storage", "uploads")
26
+ PROCESSED_DIR = os.path.join(BASE_DIR, "storage", "processed")
27
+
28
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
29
+ os.makedirs(PROCESSED_DIR, exist_ok=True)
30
+
31
+ # Mount static files to serve images
32
+ app.mount("/storage", StaticFiles(directory=os.path.join(BASE_DIR, "storage")), name="storage")
33
+
34
+ # Initialize Depth Estimator
35
+ depth_estimator = None
36
+ try:
37
+ from transformers import pipeline
38
+ print("Loading Depth Anything model... (this may take a minute on first run)")
39
+ # Using the V1 model which has native Hugging Face transformers pipeline support
40
+ depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf")
41
+ print("Depth model loaded successfully!")
42
+ except Exception as e:
43
+ print(f"Warning: Failed to load depth estimator. {e}")
44
+
45
+ def generate_depth_map(input_path: str, output_path: str):
46
+ """Generates a depth map from an image using Depth Anything V2."""
47
+ if not depth_estimator:
48
+ print("Depth estimator not loaded, simulating depth map.")
49
+ shutil.copy(input_path, output_path)
50
+ return False
51
+
52
+ try:
53
+ from PIL import Image
54
+ image = Image.open(input_path)
55
+ # Handle transparency by converting to RGB for depth estimation
56
+ if image.mode == 'RGBA':
57
+ background = Image.new('RGB', image.size, (255, 255, 255))
58
+ background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
59
+ image = background
60
+ elif image.mode != 'RGB':
61
+ image = image.convert('RGB')
62
+
63
+ result = depth_estimator(image)
64
+ depth_img = result["depth"]
65
+ depth_img.save(output_path)
66
+ return True
67
+ except Exception as e:
68
+ print(f"Depth generation failed: {e}")
69
+ return False
70
+
71
+ def remove_background(input_path: str, output_path: str):
72
+ """Uses Remove.bg API to remove background from an image."""
73
+ api_key = os.getenv("REMOVE_BG_API_KEY")
74
+ if not api_key or api_key == "your_api_key_here":
75
+ # Fallback/simulation if no key is provided yet
76
+ print(f"Warning: No valid REMOVE_BG_API_KEY found. Simulating background removal for {input_path}")
77
+ shutil.copy(input_path, output_path)
78
+ return True
79
+
80
+ try:
81
+ with open(input_path, 'rb') as f:
82
+ response = requests.post(
83
+ 'https://api.remove.bg/v1.0/removebg',
84
+ files={'image_file': f},
85
+ data={'size': 'auto'},
86
+ headers={'X-Api-Key': api_key},
87
+ )
88
+
89
+ if response.status_code == requests.codes.ok:
90
+ with open(output_path, 'wb') as out:
91
+ out.write(response.content)
92
+ return True
93
+ else:
94
+ print(f"Error from Remove.bg: {response.status_code} - {response.text}")
95
+ return False
96
+ except Exception as e:
97
+ print(f"Request failed: {e}")
98
+ return False
99
+
100
+ from fastapi import BackgroundTasks
101
+ from typing import Dict, Any
102
+
103
+ # Simple in-memory storage for job status
104
+ # In production, this would be a database (Redis/Postgres)
105
+ jobs_db: Dict[str, Any] = {}
106
+
107
+ def process_photos_background(job_id: str, files_data: list, job_upload_dir: str, job_processed_dir: str):
108
+ """Background task to process images so we don't block the API and trigger proxy timeouts."""
109
+ try:
110
+ jobs_db[job_id]["status"] = "processing"
111
+ processed_files = []
112
+
113
+ for i, (safe_filename, input_file_path) in enumerate(files_data):
114
+ output_file_path = os.path.join(job_processed_dir, f"angle_{i:02d}_nobg.png")
115
+ depth_file_path = os.path.join(job_processed_dir, f"angle_{i:02d}_depth.png")
116
+
117
+ # 2. Try to Remove background
118
+ bg_success = remove_background(input_file_path, output_file_path)
119
+
120
+ # 3. Generate depth map
121
+ # Use the output if bg removal succeeded and the file exists, otherwise fallback to original
122
+ source_for_depth = output_file_path if bg_success and os.path.exists(output_file_path) else input_file_path
123
+
124
+ generate_depth_map(source_for_depth, depth_file_path)
125
+
126
+ # Determine correct folder prefix for URL since source might be in 'uploads' instead of 'processed'
127
+ source_folder = "processed" if bg_success and os.path.exists(output_file_path) else "uploads"
128
+
129
+ processed_files.append({
130
+ "angle": i,
131
+ "image_url": f"/storage/{source_folder}/{job_id}/{os.path.basename(source_for_depth)}",
132
+ "depth_url": f"/storage/processed/{job_id}/{os.path.basename(depth_file_path)}"
133
+ })
134
+
135
+ # Update job as complete
136
+ jobs_db[job_id] = {
137
+ "status": "success",
138
+ "layers": processed_files
139
+ }
140
+ except Exception as e:
141
+ import traceback
142
+ traceback.print_exc()
143
+ jobs_db[job_id] = {
144
+ "status": "error",
145
+ "message": str(e)
146
+ }
147
+
148
+ @app.get("/")
149
+ def read_root():
150
+ return {"status": "ok", "message": "Experience Eats Backend is running"}
151
+
152
+ @app.post("/api/process-dish")
153
+ async def process_dish_photos(background_tasks: BackgroundTasks, files: list[UploadFile] = File(...)):
154
+ """
155
+ Receives 12 photos of a dish, saves them, and starts the 2.5D processing pipeline in the background.
156
+ """
157
+ if len(files) != 12:
158
+ raise HTTPException(status_code=400, detail="Exactly 12 photos are required")
159
+
160
+ # Generate common job ID
161
+ job_id = str(uuid.uuid4())
162
+ job_upload_dir = os.path.join(UPLOAD_DIR, job_id)
163
+ job_processed_dir = os.path.join(PROCESSED_DIR, job_id)
164
+ os.makedirs(job_upload_dir, exist_ok=True)
165
+ os.makedirs(job_processed_dir, exist_ok=True)
166
+
167
+ files_data = []
168
+
169
+ # Save uploaded files synchronously before passing to background task
170
+ for i, file in enumerate(files):
171
+ # Validate format
172
+ if not file.content_type.startswith("image/"):
173
+ raise HTTPException(status_code=400, detail=f"File {file.filename} is not an image")
174
+
175
+ file_extension = os.path.splitext(file.filename)[1]
176
+ if not file_extension:
177
+ file_extension = ".jpg" # fallback
178
+
179
+ safe_filename = f"angle_{i:02d}{file_extension}"
180
+ input_file_path = os.path.join(job_upload_dir, safe_filename)
181
+
182
+ with open(input_file_path, "wb") as buffer:
183
+ shutil.copyfileobj(file.file, buffer)
184
+
185
+ files_data.append((safe_filename, input_file_path))
186
+
187
+ # Give initial status
188
+ jobs_db[job_id] = {"status": "pending"}
189
+
190
+ # Send to background task
191
+ background_tasks.add_task(process_photos_background, job_id, files_data, job_upload_dir, job_processed_dir)
192
+
193
+ return {
194
+ "status": "accepted",
195
+ "job_id": job_id,
196
+ "message": "Processing started in the background. Poll /api/job-status/{job_id} for completion."
197
+ }
198
+
199
+ @app.get("/api/job-status/{job_id}")
200
+ def get_job_status(job_id: str):
201
+ """
202
+ Endpoint for the frontend to poll the status of a long-running 2.5D crop/depth job.
203
+ """
204
+ if job_id not in jobs_db:
205
+ raise HTTPException(status_code=404, detail="Job not found")
206
+ return jobs_db[job_id]
207
+
208
+ if __name__ == "__main__":
209
+ import uvicorn
210
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.111.0
2
+ uvicorn==0.30.1
3
+ python-multipart==0.0.9
4
+ requests==2.32.3
5
+ python-dotenv==1.0.1
6
+ torch
7
+ transformers
8
+ Pillow