| import os |
| import io |
| import uuid |
| import re |
| import time |
| import json |
| import traceback |
| import wave |
| from datetime import datetime, timedelta |
|
|
| from flask import Flask, request, jsonify, Response |
| from flask_cors import CORS |
| import firebase_admin |
| from firebase_admin import credentials, db, storage, auth |
| from PIL import Image |
| from io import BytesIO |
| import requests |
|
|
| |
| from google import genai |
| from google.genai import types |
|
|
| |
| |
| |
|
|
| |
| app = Flask(__name__) |
| CORS(app) |
|
|
| |
| try: |
| credentials_json_string = os.environ.get("FIREBASE") |
| if not credentials_json_string: |
| raise ValueError("The FIREBASE environment variable is not set.") |
| |
| credentials_json = json.loads(credentials_json_string) |
| firebase_db_url = os.environ.get("Firebase_DB") |
| firebase_storage_bucket = os.environ.get("Firebase_Storage") |
|
|
| if not firebase_db_url or not firebase_storage_bucket: |
| raise ValueError("Firebase_DB and Firebase_Storage environment variables must be set.") |
|
|
| cred = credentials.Certificate(credentials_json) |
| firebase_admin.initialize_app(cred, { |
| 'databaseURL': firebase_db_url, |
| 'storageBucket': firebase_storage_bucket |
| }) |
| print("Firebase Admin SDK initialized successfully.") |
| except Exception as e: |
| print(f"FATAL: Error initializing Firebase: {e}") |
| exit(1) |
|
|
| |
| bucket = storage.bucket() |
| db_ref = db.reference() |
|
|
|
|
| |
| try: |
| api_key = os.environ.get("Gemini") |
| if not api_key: |
| raise ValueError("The 'Gemini' environment variable for the API key is not set.") |
| |
| client = genai.Client(api_key=api_key) |
| print("Google GenAI Client initialized successfully.") |
| except Exception as e: |
| print(f"FATAL: Error initializing GenAI Client: {e}") |
| exit(1) |
|
|
| |
| CATEGORY_MODEL = "gemini-2.0-flash-exp" |
| GENERATION_MODEL = "gemini-2.0-flash-exp-image-generation" |
| TTS_MODEL = "gemini-2.5-flash-preview-tts" |
|
|
|
|
| |
| |
| |
|
|
| def verify_token(auth_header): |
| """Verifies the Firebase ID token from the Authorization header.""" |
| if not auth_header or not auth_header.startswith('Bearer '): |
| return None |
| token = auth_header.split('Bearer ')[1] |
| try: |
| decoded_token = auth.verify_id_token(token) |
| return decoded_token['uid'] |
| except Exception as e: |
| print(f"Token verification failed: {e}") |
| return None |
|
|
| def verify_admin(auth_header): |
| """Verifies if the user is an admin.""" |
| uid = verify_token(auth_header) |
| if not uid: |
| raise PermissionError('Invalid or missing user token') |
| |
| user_ref = db_ref.child(f'users/{uid}') |
| user_data = user_ref.get() |
| if not user_data or not user_data.get('is_admin', False): |
| raise PermissionError('Admin access required') |
| return uid |
|
|
| def upload_to_storage(data_bytes, destination_blob_name, content_type): |
| """Uploads a bytes object to Firebase Storage and returns its public URL.""" |
| blob = bucket.blob(destination_blob_name) |
| blob.upload_from_string(data_bytes, content_type=content_type) |
| blob.make_public() |
| return blob.public_url |
|
|
| def parse_numbered_steps(text): |
| """Helper to parse numbered steps out of Gemini text.""" |
| text = "\n" + text |
| steps_found = re.findall(r"\n\s*(\d+)\.\s*(.*)", text, re.MULTILINE) |
| return [{"stepNumber": int(num), "text": desc.strip()} for num, desc in steps_found] |
|
|
| def _convert_pcm_to_wav(pcm_data, sample_rate=24000, channels=1, sample_width=2): |
| """Wraps raw PCM audio data in a WAV container in memory.""" |
| audio_buffer = io.BytesIO() |
| with wave.open(audio_buffer, 'wb') as wf: |
| wf.setnchannels(channels) |
| wf.setsampwidth(sample_width) |
| wf.setframerate(sample_rate) |
| wf.writeframes(pcm_data) |
| audio_buffer.seek(0) |
| return audio_buffer.getvalue() |
|
|
| def generate_tts_audio_and_upload(text_to_speak, uid, project_id, step_num): |
| """Generates audio using the exact method from the Streamlit app and uploads it.""" |
| try: |
| response = client.models.generate_content( |
| model=TTS_MODEL, |
| contents=f"""You are an articulate AI assistant — confident and precise like Jarvis.Rephrase the instruction naturally using simple expert language. |
| Speak with a brisk, clear British accent. |
| Avoid reading word for word — explain it like you know it. |
| No quips or acknowledging the prompt just narrate this step: |
| {text_to_speak}""", |
| config=types.GenerateContentConfig( |
| response_modalities=["AUDIO"], |
| speech_config=types.SpeechConfig( |
| voice_config=types.VoiceConfig( |
| prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name='Sadaltager') |
| ) |
| ), |
| ) |
| ) |
| audio_part = response.candidates[0].content.parts[0] |
| audio_data = audio_part.inline_data.data |
| mime_type = audio_part.inline_data.mime_type |
| |
| final_audio_bytes = _convert_pcm_to_wav(audio_data) if 'pcm' in mime_type else audio_data |
| |
| audio_path = f"users/{uid}/projects/{project_id}/narrations/step_{step_num}.wav" |
| return upload_to_storage(final_audio_bytes, audio_path, 'audio/wav') |
| except Exception as e: |
| print(f"Error during TTS generation for step {step_num}: {e}") |
| return None |
|
|
| def send_text_request(model_name, prompt, image): |
| """Helper to send requests that expect only a text response.""" |
| try: |
| chat = client.chats.create(model=model_name) |
| response = chat.send_message([prompt, image]) |
| response_text = "".join(part.text for part in response.candidates[0].content.parts if hasattr(part, 'text')) |
| return response_text.strip() |
| except Exception as e: |
| print(f"Error with model {model_name}: {e}") |
| return None |
|
|
|
|
| import logging |
|
|
| |
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| |
| |
| @app.route('/api/image-proxy', methods=['GET']) |
| def image_proxy(): |
| image_url = request.args.get('url') |
| logger.info(f"[IMAGE PROXY] Received URL: {image_url}") |
| |
| if not image_url: |
| logger.error("[IMAGE PROXY] ERROR: URL parameter is missing") |
| return jsonify({'error': 'URL parameter is missing.'}), 400 |
|
|
| try: |
| |
| |
| if 'storage.googleapis.com' not in image_url: |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid Firebase Storage URL: {image_url}") |
| return jsonify({'error': 'Invalid Firebase Storage URL.'}), 400 |
| |
| logger.info(f"[IMAGE PROXY] Parsing URL: {image_url}") |
| |
| |
| url_parts = image_url.split('storage.googleapis.com/')[1] |
| logger.info(f"[IMAGE PROXY] URL parts after split: {url_parts}") |
| |
| |
| url_parts = url_parts.split('?')[0] |
| logger.info(f"[IMAGE PROXY] URL parts after removing query params: {url_parts}") |
| |
| |
| path_components = url_parts.split('/', 1) |
| logger.info(f"[IMAGE PROXY] Path components: {path_components}") |
| |
| if len(path_components) < 2: |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid URL format - path_components: {path_components}") |
| return jsonify({'error': 'Invalid URL format.'}), 400 |
| |
| url_bucket_name = path_components[0] |
| blob_path = path_components[1] |
| |
| logger.info(f"[IMAGE PROXY] Extracted bucket name: {url_bucket_name}") |
| logger.info(f"[IMAGE PROXY] Extracted blob path: {blob_path}") |
| |
| |
| expected_bucket_name = bucket.name |
| logger.info(f"[IMAGE PROXY] Expected bucket name: {expected_bucket_name}") |
| |
| if url_bucket_name != expected_bucket_name: |
| logger.error(f"[IMAGE PROXY] ERROR: Bucket name mismatch - URL: {url_bucket_name}, Expected: {expected_bucket_name}") |
| return jsonify({'error': 'Bucket name mismatch.'}), 403 |
| |
| logger.info(f"[IMAGE PROXY] Creating blob object for path: {blob_path}") |
| |
| |
| blob = bucket.blob(blob_path) |
|
|
| logger.info(f"[IMAGE PROXY] Checking if blob exists...") |
| if not blob.exists(): |
| logger.error(f"[IMAGE PROXY] ERROR: Image not found at path: {blob_path}") |
| return jsonify({'error': 'Image not found.'}), 404 |
|
|
| logger.info(f"[IMAGE PROXY] Downloading blob...") |
| |
| image_bytes = blob.download_as_bytes() |
| content_type = blob.content_type or 'application/octet-stream' |
| |
| logger.info(f"[IMAGE PROXY] Successfully downloaded {len(image_bytes)} bytes, content-type: {content_type}") |
| |
| |
| response = Response(image_bytes, content_type=content_type) |
| response.headers['Cache-Control'] = 'public, max-age=3600' |
| return response |
|
|
| except IndexError as e: |
| logger.error(f"[IMAGE PROXY] URL parsing IndexError: {e}") |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") |
| return jsonify({'error': 'Invalid URL format.'}), 400 |
| except Exception as e: |
| |
| logger.error(f"[IMAGE PROXY] Unexpected error: {e}") |
| logger.error(f"[IMAGE PROXY] Error type: {type(e).__name__}") |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") |
| import traceback |
| logger.error(f"[IMAGE PROXY] Full traceback: {traceback.format_exc()}") |
| return jsonify({'error': 'Internal server error processing the image request.'}), 500 |
|
|
| |
| |
| |
|
|
| @app.route('/api/auth/signup', methods=['POST']) |
| def signup(): |
| try: |
| data = request.get_json() |
| email, password = data.get('email'), data.get('password') |
| if not email or not password: return jsonify({'error': 'Email and password are required'}), 400 |
|
|
| user = auth.create_user(email=email, password=password) |
| user_ref = db_ref.child(f'users/{user.uid}') |
| user_data = {'email': email, 'credits': 15, 'is_admin': False, 'createdAt': datetime.utcnow().isoformat()} |
| user_ref.set(user_data) |
| return jsonify({'success': True, 'uid': user.uid, **user_data}), 201 |
| except Exception as e: |
| return jsonify({'error': str(e)}), 400 |
|
|
| @app.route('/api/auth/social-signin', methods=['POST']) |
| def social_signin(): |
| """ |
| Ensures a user record exists in the Realtime Database after a social login |
| (like Google Sign-In). The client should call this endpoint immediately after |
| a successful Firebase authentication on their side, sending the |
| Firebase ID Token. This creates the user's profile in our database if it's |
| their first time. |
| """ |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: |
| return jsonify({'error': 'Invalid or expired token'}), 401 |
|
|
| user_ref = db_ref.child(f'users/{uid}') |
| user_data = user_ref.get() |
|
|
| if user_data: |
| |
| return jsonify({'uid': uid, **user_data}), 200 |
| else: |
| |
| try: |
| |
| firebase_user = auth.get_user(uid) |
| |
| |
| new_user_data = { |
| 'email': firebase_user.email, |
| 'credits': 15, |
| 'is_admin': False, |
| 'createdAt': datetime.utcnow().isoformat() |
| } |
| user_ref.set(new_user_data) |
| |
| |
| return jsonify({'success': True, 'uid': uid, **new_user_data}), 201 |
| except Exception as e: |
| print(f"Error creating profile for new social user {uid}: {e}") |
| return jsonify({'error': f'Failed to create user profile: {str(e)}'}), 500 |
|
|
| @app.route('/api/user/profile', methods=['GET']) |
| def get_user_profile(): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 |
| |
| user_data = db_ref.child(f'users/{uid}').get() |
| if not user_data: return jsonify({'error': 'User not found'}), 404 |
| |
| return jsonify({'uid': uid, **user_data}) |
|
|
| |
| |
| |
|
|
| @app.route('/api/feedback', methods=['POST']) |
| def submit_feedback(): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 |
|
|
| try: |
| data = request.get_json() |
| if not data or not data.get('message'): return jsonify({'error': 'Message is required'}), 400 |
|
|
| user_email = (db_ref.child(f'users/{uid}').get() or {}).get('email', 'unknown') |
| |
| feedback_ref = db_ref.child('feedback').push() |
| feedback_record = { |
| "feedbackId": feedback_ref.key, |
| "userId": uid, |
| "userEmail": user_email, |
| "type": data.get('type', 'general'), |
| "message": data.get('message'), |
| "createdAt": datetime.utcnow().isoformat(), |
| "status": "open" |
| } |
| feedback_ref.set(feedback_record) |
| return jsonify({"success": True, "feedbackId": feedback_ref.key}), 201 |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/user/request-credits', methods=['POST']) |
| def request_credits(): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 |
| |
| try: |
| data = request.get_json() |
| if not data or 'requested_credits' not in data: return jsonify({'error': 'requested_credits is required'}), 400 |
|
|
| request_ref = db_ref.child('credit_requests').push() |
| request_ref.set({ |
| 'requestId': request_ref.key, |
| 'userId': uid, |
| 'requested_credits': data['requested_credits'], |
| 'status': 'pending', |
| 'requestedAt': datetime.utcnow().isoformat() |
| }) |
| return jsonify({'success': True, 'requestId': request_ref.key}) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| |
| |
| |
|
|
| @app.route('/api/admin/profile', methods=['GET']) |
| def get_admin_profile(): |
| try: |
| admin_uid = verify_admin(request.headers.get('Authorization')) |
| |
| |
| all_users = db_ref.child('users').get() or {} |
| all_projects = db_ref.child('projects').get() or {} |
| all_feedback = db_ref.child('feedback').get() or {} |
| all_credit_requests = db_ref.child('credit_requests').get() or {} |
|
|
| |
| total_users = len(all_users) |
| admin_count = 0 |
| total_credits_in_system = 0 |
| new_users_last_7_days = 0 |
| seven_days_ago = datetime.utcnow() - timedelta(days=7) |
|
|
| for user_data in all_users.values(): |
| if user_data.get('is_admin', False): |
| admin_count += 1 |
| total_credits_in_system += user_data.get('credits', 0) |
| |
| |
| try: |
| created_at_str = user_data.get('createdAt') |
| if created_at_str: |
| |
| user_created_at = datetime.fromisoformat(created_at_str.replace('Z', '+00:00')) |
| if user_created_at.replace(tzinfo=None) > seven_days_ago: |
| new_users_last_7_days += 1 |
| except (ValueError, TypeError): |
| |
| pass |
|
|
| |
| total_projects = len(all_projects) |
| projects_by_status = { |
| "awaiting_approval": 0, |
| "awaiting_selection": 0, |
| "ready": 0, |
| "unknown": 0 |
| } |
| projects_by_category = {} |
|
|
| for project_data in all_projects.values(): |
| |
| status = project_data.get('status', 'unknown') |
| projects_by_status[status] = projects_by_status.get(status, 0) + 1 |
| |
| |
| category = project_data.get('category', 'N/A') |
| projects_by_category[category] = projects_by_category.get(category, 0) + 1 |
|
|
| |
| open_feedback_count = sum(1 for fb in all_feedback.values() if fb.get('status') == 'open') |
| pending_requests_count = sum(1 for req in all_credit_requests.values() if req.get('status') == 'pending') |
|
|
| |
| admin_personal_data = all_users.get(admin_uid, {}) |
| |
| response_data = { |
| 'uid': admin_uid, |
| 'email': admin_personal_data.get('email'), |
| 'credits': admin_personal_data.get('credits'), |
| 'is_admin': True, |
| 'dashboardStats': { |
| 'users': { |
| 'total': total_users, |
| 'admins': admin_count, |
| 'regular': total_users - admin_count, |
| 'newLast7Days': new_users_last_7_days, |
| 'totalCreditsInSystem': total_credits_in_system |
| }, |
| 'projects': { |
| 'total': total_projects, |
| 'byStatus': projects_by_status, |
| 'byCategory': projects_by_category |
| }, |
| 'system': { |
| 'openFeedback': open_feedback_count, |
| 'pendingCreditRequests': pending_requests_count |
| } |
| } |
| } |
| |
| return jsonify(response_data), 200 |
|
|
| except PermissionError as e: |
| return jsonify({'error': str(e)}), 403 |
| except Exception as e: |
| print(traceback.format_exc()) |
| return jsonify({'error': f"An internal error occurred: {e}"}), 500 |
|
|
| @app.route('/api/admin/credit_requests', methods=['GET']) |
| def list_credit_requests(): |
| try: |
| verify_admin(request.headers.get('Authorization')) |
| requests_data = db_ref.child('credit_requests').get() or {} |
| return jsonify(list(requests_data.values())) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/admin/credit_requests/<string:request_id>', methods=['PUT']) |
| def process_credit_request(request_id): |
| try: |
| admin_uid = verify_admin(request.headers.get('Authorization')) |
| req_ref = db_ref.child(f'credit_requests/{request_id}') |
| req_data = req_ref.get() |
| if not req_data: return jsonify({'error': 'Credit request not found'}), 404 |
|
|
| decision = request.json.get('decision') |
| if decision not in ['approved', 'declined']: return jsonify({'error': 'Decision must be "approved" or "declined"'}), 400 |
|
|
| if decision == 'approved': |
| user_ref = db_ref.child(f'users/{req_data["userId"]}') |
| user_data = user_ref.get() |
| if user_data: |
| new_total = user_data.get('credits', 0) + int(req_data.get('requested_credits', 0)) |
| user_ref.update({'credits': new_total}) |
| |
| req_ref.update({'status': decision, 'processedBy': admin_uid, 'processedAt': datetime.utcnow().isoformat()}) |
| return jsonify({'success': True, 'message': f'Request {decision}.'}) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/admin/feedback', methods=['GET']) |
| def admin_view_feedback(): |
| try: |
| verify_admin(request.headers.get('Authorization')) |
| feedback_data = db_ref.child('feedback').get() or {} |
| return jsonify(list(feedback_data.values())) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/admin/users', methods=['GET']) |
| def admin_list_users(): |
| try: |
| verify_admin(request.headers.get('Authorization')) |
| all_users = db_ref.child('users').get() or {} |
| user_list = [{'uid': uid, **data} for uid, data in all_users.items()] |
| return jsonify(user_list) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/admin/users/<string:uid>/credits', methods=['PUT']) |
| def admin_update_credits(uid): |
| try: |
| verify_admin(request.headers.get('Authorization')) |
| add_credits = request.json.get('add_credits') |
| if add_credits is None: return jsonify({'error': 'add_credits is required'}), 400 |
|
|
| user_ref = db_ref.child(f'users/{uid}') |
| user_data = user_ref.get() |
| if not user_data: return jsonify({'error': 'User not found'}), 404 |
|
|
| new_total = user_data.get('credits', 0) + float(add_credits) |
| user_ref.update({'credits': new_total}) |
| return jsonify({'success': True, 'new_total_credits': new_total}) |
| except Exception as e: |
| return jsonify({'error': str(e)}), 500 |
|
|
| |
| |
| |
| |
| @app.route('/api/projects', methods=['POST']) |
| def create_project(): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 |
|
|
| user_ref = db_ref.child(f'users/{uid}') |
| user_data = user_ref.get() |
| if not user_data or user_data.get('credits', 0) < 1: |
| return jsonify({'error': 'Insufficient credits'}), 402 |
|
|
| if 'image' not in request.files: |
| return jsonify({'error': 'Image file is required'}), 400 |
| |
| image_file = request.files['image'] |
| context_text = request.form.get('contextText', '') |
| image_bytes = image_file.read() |
| pil_image = Image.open(io.BytesIO(image_bytes)) |
|
|
| try: |
| category_prompt = ( |
| "You are an expert DIY assistant. Analyze the user's image and context. " |
| f"Context: '{context_text}'. " |
| "Categorize the project into ONE of the following: " |
| "Home Appliance Repair, Automotive Maintenance, Gardening & Urban Farming, " |
| "Upcycling & Sustainable Crafts, or DIY Project Creation. " |
| "Reply with ONLY the category name." |
| ) |
| category = send_text_request(CATEGORY_MODEL, category_prompt, pil_image) |
| if not category: return jsonify({'error': 'Failed to get project category from AI.'}), 500 |
|
|
| plan_prompt = f""" |
| You are an expert DIY assistant in the category: {category}. |
| User Context: "{context_text if context_text else 'No context provided.'}" |
| Based on the image and context, perform the following: |
| 1. **Title:** Create a short, clear title for this project. |
| 2. **Description:** Write a brief, one-paragraph description of the goal. |
| 3. **Initial Plan:** |
| - If 'Upcycling & Sustainable Crafts' AND no specific project is mentioned, propose three distinct project options as a numbered list under "UPCYCLING OPTIONS:". |
| - For all other cases, briefly outline the main stages of the proposed solution. |
| Structure your response EXACTLY like this: |
| TITLE: [Your title] |
| DESCRIPTION: [Your description] |
| INITIAL PLAN: |
| [Your plan or 3 options] |
| """ |
| plan_response = send_text_request(GENERATION_MODEL, plan_prompt, pil_image) |
| if not plan_response: return jsonify({'error': 'Failed to generate project plan from AI.'}), 500 |
| |
| title = re.search(r"TITLE:\s*(.*)", plan_response).group(1).strip() |
| description = re.search(r"DESCRIPTION:\s*(.*)", plan_response, re.DOTALL).group(1).strip() |
| initial_plan_text = re.search(r"INITIAL PLAN:\s*(.*)", plan_response, re.DOTALL).group(1).strip() |
|
|
| upcycling_options = re.findall(r"^\s*\d+\.\s*(.*)", initial_plan_text, re.MULTILINE) if "UPCYCLING OPTIONS:" in initial_plan_text else [] |
| initial_plan = initial_plan_text if not upcycling_options else "" |
| status = "awaiting_selection" if upcycling_options else "awaiting_approval" |
| |
| project_id = str(uuid.uuid4()) |
| image_path = f"users/{uid}/projects/{project_id}/initial_image.png" |
| image_url = upload_to_storage(image_bytes, image_path, content_type=image_file.content_type) |
|
|
| project_data = { |
| "uid": uid, "projectId": project_id, "status": status, "createdAt": datetime.utcnow().isoformat(), |
| "userImageURL": image_url, "contextText": context_text, "projectTitle": title, |
| "projectDescription": description, "category": category, "initialPlan": initial_plan, |
| "upcyclingOptions": upcycling_options, "toolsList": [], "steps": [] |
| } |
| db_ref.child(f'projects/{project_id}').set(project_data) |
|
|
| user_ref.update({'credits': user_data.get('credits', 0) - 1}) |
| return jsonify(project_data), 201 |
|
|
| except Exception as e: |
| print(traceback.format_exc()) |
| return jsonify({'error': f"An error occurred: {e}"}), 500 |
|
|
|
|
| @app.route('/api/projects/<string:project_id>/approve', methods=['PUT']) |
| def approve_project_plan(project_id): |
| start_time = time.time() |
| logger.info(f"[PROJECT APPROVAL] Starting approval process for project: {project_id}") |
| |
| |
| auth_start = time.time() |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: |
| logger.error(f"[PROJECT APPROVAL] ERROR: Unauthorized access attempt for project: {project_id}") |
| return jsonify({'error': 'Unauthorized'}), 401 |
| auth_time = time.time() - auth_start |
| logger.info(f"[PROJECT APPROVAL] Authorization completed in {auth_time:.3f}s for user: {uid}") |
|
|
| |
| user_fetch_start = time.time() |
| user_ref = db_ref.child(f'users/{uid}') |
| user_data = user_ref.get() |
| if not user_data or user_data.get('credits', 0) < 5: |
| logger.error(f"[PROJECT APPROVAL] ERROR: Insufficient credits for user: {uid}, credits: {user_data.get('credits', 0) if user_data else 0}") |
| return jsonify({'error': 'Insufficient credits'}), 402 |
| user_fetch_time = time.time() - user_fetch_start |
| logger.info(f"[PROJECT APPROVAL] User data fetch completed in {user_fetch_time:.3f}s, credits: {user_data.get('credits', 0)}") |
|
|
| |
| project_fetch_start = time.time() |
| project_ref = db_ref.child(f'projects/{project_id}') |
| project_data = project_ref.get() |
| if not project_data or project_data.get('uid') != uid: |
| logger.error(f"[PROJECT APPROVAL] ERROR: Project not found or access denied - project_id: {project_id}, uid: {uid}") |
| return jsonify({'error': 'Project not found or access denied'}), 404 |
| project_fetch_time = time.time() - project_fetch_start |
| logger.info(f"[PROJECT APPROVAL] Project data fetch completed in {project_fetch_time:.3f}s for project: {project_data.get('projectTitle', 'Unknown')}") |
|
|
| |
| selected_option = request.json.get('selectedOption') |
| logger.info(f"[PROJECT APPROVAL] Selected option: {selected_option}") |
| |
| image_download_start = time.time() |
| response = requests.get(project_data['userImageURL']) |
| image_download_time = time.time() - image_download_start |
| logger.info(f"[PROJECT APPROVAL] Image download completed in {image_download_time:.3f}s, size: {len(response.content)} bytes") |
| |
| image_processing_start = time.time() |
| pil_image = Image.open(io.BytesIO(response.content)).convert('RGB') |
| image_processing_time = time.time() - image_processing_start |
| logger.info(f"[PROJECT APPROVAL] Image processing completed in {image_processing_time:.3f}s") |
|
|
| |
| context_start = time.time() |
| context = ( |
| f"The user chose the upcycling project: '{selected_option}'." |
| if selected_option |
| else f"The user has approved the plan for '{project_data['projectTitle']}'." |
| ) |
|
|
| detailed_prompt = f""" |
| You are a DIY expert. The user wants to proceed with the project titled "{project_data['projectTitle']}". |
| {context} |
| Provide a detailed guide. For each step, you MUST provide a simple, clear illustrative image. |
| Format your response EXACTLY like this: |
| |
| TOOLS AND MATERIALS: |
| - Tool A |
| - Material B |
| |
| STEPS(Maximum 5 steps): |
| 1. First step instructions. |
| 2. Second step instructions... |
| """ |
| context_time = time.time() - context_start |
| logger.info(f"[PROJECT APPROVAL] Context preparation completed in {context_time:.3f}s") |
|
|
| try: |
| |
| ai_start = time.time() |
| logger.info(f"[PROJECT APPROVAL] Starting AI generation with model: {GENERATION_MODEL}") |
| |
| chat = client.chats.create( |
| model=GENERATION_MODEL, |
| config=types.GenerateContentConfig(response_modalities=["Text", "Image"]) |
| ) |
| full_resp = chat.send_message([detailed_prompt, pil_image]) |
| ai_time = time.time() - ai_start |
| logger.info(f"[PROJECT APPROVAL] AI generation completed in {ai_time:.3f}s") |
| |
| |
| parsing_start = time.time() |
| gen_parts = full_resp.candidates[0].content.parts |
|
|
| combined_text = "" |
| inline_images = [] |
| for part in gen_parts: |
| if part.text is not None: |
| combined_text += part.text + "\n" |
| if part.inline_data is not None: |
| img = Image.open(io.BytesIO(part.inline_data.data)).convert('RGB') |
| inline_images.append(img) |
|
|
| combined_text = combined_text.strip() |
| parsing_time = time.time() - parsing_start |
| logger.info(f"[PROJECT APPROVAL] Response parsing completed in {parsing_time:.3f}s, found {len(inline_images)} images") |
|
|
| |
| extraction_start = time.time() |
| tools_section = re.search(r"TOOLS AND MATERIALS:\s*(.*?)\s*STEPS:", combined_text, re.DOTALL).group(1).strip() |
| steps_section = re.search(r"STEPS:\s*(.*)", combined_text, re.DOTALL).group(1).strip() |
|
|
| tools_list = [line.strip("- ").strip() for line in tools_section.split('\n') if line.strip()] |
| parsed_steps = parse_numbered_steps(steps_section) |
| extraction_time = time.time() - extraction_start |
| logger.info(f"[PROJECT APPROVAL] Text extraction completed in {extraction_time:.3f}s, tools: {len(tools_list)}, steps: {len(parsed_steps)}") |
|
|
| if len(parsed_steps) != len(inline_images): |
| logger.error(f"[PROJECT APPROVAL] ERROR: AI response mismatch - Steps: {len(parsed_steps)}, Images: {len(inline_images)}") |
| return jsonify({'error': 'AI response mismatch: Steps and images do not match.'}), 500 |
|
|
| |
| step_processing_start = time.time() |
| final_steps = [] |
| total_upload_time = 0 |
| total_tts_time = 0 |
| |
| for i, step_info in enumerate(parsed_steps): |
| logger.info(f"[PROJECT APPROVAL] Processing step {i+1}/{len(parsed_steps)}") |
| |
| |
| image_upload_start = time.time() |
| img_byte_arr = io.BytesIO() |
| inline_images[i].save(img_byte_arr, format='JPEG', optimize=True, quality=70) |
| img_path = f"users/{uid}/projects/{project_id}/steps/step_{i+1}_image.jpg" |
| img_url = upload_to_storage(img_byte_arr.getvalue(), img_path, 'image/jpeg') |
| image_upload_time = time.time() - image_upload_start |
| total_upload_time += image_upload_time |
| logger.info(f"[PROJECT APPROVAL] Step {i+1} image upload completed in {image_upload_time:.3f}s") |
|
|
| |
| tts_start = time.time() |
| narration_url = generate_tts_audio_and_upload(step_info['text'], uid, project_id, i + 1) |
| tts_time = time.time() - tts_start |
| total_tts_time += tts_time |
| logger.info(f"[PROJECT APPROVAL] Step {i+1} TTS generation completed in {tts_time:.3f}s") |
| |
| step_info.update({ |
| "imageUrl": img_url, |
| "narrationUrl": narration_url, |
| "isDone": False, |
| "notes": "" |
| }) |
| final_steps.append(step_info) |
|
|
| step_processing_time = time.time() - step_processing_start |
| logger.info(f"[PROJECT APPROVAL] All steps processing completed in {step_processing_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] Total upload time: {total_upload_time:.3f}s, Total TTS time: {total_tts_time:.3f}s") |
|
|
| |
| db_update_start = time.time() |
| update_data = { |
| "status": "ready", |
| "toolsList": tools_list, |
| "steps": final_steps, |
| "selectedOption": selected_option or "" |
| } |
| project_ref.update(update_data) |
| db_update_time = time.time() - db_update_start |
| logger.info(f"[PROJECT APPROVAL] Database update completed in {db_update_time:.3f}s") |
|
|
| |
| final_fetch_start = time.time() |
| updated_project = project_ref.get() |
| updated_project["projectId"] = project_id |
| final_fetch_time = time.time() - final_fetch_start |
| logger.info(f"[PROJECT APPROVAL] Final project fetch completed in {final_fetch_time:.3f}s") |
|
|
| |
| credits_update_start = time.time() |
| user_ref.update({'credits': user_data.get('credits', 0) - 5}) |
| credits_update_time = time.time() - credits_update_start |
| logger.info(f"[PROJECT APPROVAL] Credits update completed in {credits_update_time:.3f}s") |
|
|
| |
| total_time = time.time() - start_time |
| logger.info(f"[PROJECT APPROVAL] SUCCESS: Project approval completed in {total_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] TIMING BREAKDOWN:") |
| logger.info(f"[PROJECT APPROVAL] - Authorization: {auth_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - User fetch: {user_fetch_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Project fetch: {project_fetch_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Image download: {image_download_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Image processing: {image_processing_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Context prep: {context_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - AI generation: {ai_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Response parsing: {parsing_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Text extraction: {extraction_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Step processing: {step_processing_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Total uploads: {total_upload_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Total TTS: {total_tts_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - DB update: {db_update_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Final fetch: {final_fetch_time:.3f}s") |
| logger.info(f"[PROJECT APPROVAL] - Credits update: {credits_update_time:.3f}s") |
| |
| return jsonify(updated_project) |
|
|
| except Exception as e: |
| total_time = time.time() - start_time |
| logger.error(f"[PROJECT APPROVAL] ERROR: Exception occurred after {total_time:.3f}s: {e}") |
| logger.error(f"[PROJECT APPROVAL] Error type: {type(e).__name__}") |
| logger.error(f"[PROJECT APPROVAL] Project ID: {project_id}, User ID: {uid}") |
| import traceback |
| logger.error(f"[PROJECT APPROVAL] Full traceback: {traceback.format_exc()}") |
| return jsonify({'error': str(e)}), 500 |
|
|
| @app.route('/api/projects', methods=['GET']) |
| def list_projects(): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 |
| projects = (db_ref.child('projects').order_by_child('uid').equal_to(uid).get() or {}).values() |
| return jsonify(list(projects)) |
|
|
| @app.route('/api/projects/<string:project_id>', methods=['GET']) |
| def get_project(project_id): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 |
| project_data = db_ref.child(f'projects/{project_id}').get() |
| if not project_data or project_data.get('uid') != uid: |
| return jsonify({'error': 'Project not found or access denied'}), 404 |
| return jsonify(project_data) |
|
|
| @app.route('/api/projects/<string:project_id>/step/<int:step_number>', methods=['PUT']) |
| def update_step(project_id, step_number): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 |
| data = request.get_json() |
| if data is None: return jsonify({'error': 'JSON body is required'}), 400 |
|
|
| project_data = db_ref.child(f'projects/{project_id}').get() |
| if not project_data or project_data.get('uid') != uid: |
| return jsonify({'error': 'Project not found or access denied'}), 404 |
|
|
| steps = project_data.get('steps', []) |
| step_index = next((i for i, s in enumerate(steps) if s.get('stepNumber') == step_number), -1) |
| if step_index == -1: return jsonify({'error': f'Step number {step_number} not found'}), 404 |
|
|
| step_path = f'projects/{project_id}/steps/{step_index}' |
| if 'isDone' in data: db_ref.child(f'{step_path}/isDone').set(bool(data['isDone'])) |
| if 'notes' in data: db_ref.child(f'{step_path}/notes').set(str(data['notes'])) |
| |
| return jsonify({"success": True, "updatedStep": db_ref.child(step_path).get()}) |
|
|
| @app.route('/api/projects/<string:project_id>', methods=['DELETE']) |
| def delete_project(project_id): |
| uid = verify_token(request.headers.get('Authorization')) |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 |
| |
| project_ref = db_ref.child(f'projects/{project_id}') |
| project_data = project_ref.get() |
| if not project_data or project_data.get('uid') != uid: |
| return jsonify({'error': 'Project not found or access denied'}), 404 |
|
|
| project_ref.delete() |
| for blob in bucket.list_blobs(prefix=f"users/{uid}/projects/{project_id}/"): |
| blob.delete() |
| return jsonify({"success": True, "message": f"Project {project_id} deleted."}) |
|
|
|
|
| |
| |
| |
| if __name__ == '__main__': |
| app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 7860))) |