Spaces:
Sleeping
Sleeping
| import os | |
| import io | |
| import uuid | |
| import re | |
| import time | |
| import json | |
| import traceback | |
| import wave | |
| from datetime import datetime, timedelta | |
| from flask import Flask, request, jsonify, Response | |
| from flask_cors import CORS | |
| import firebase_admin | |
| from firebase_admin import credentials, db, storage, auth | |
| from PIL import Image | |
| from io import BytesIO | |
| import requests | |
| from elevenlabs import ElevenLabs | |
| # Import and configure Google GenAI, matching the Streamlit app | |
| from google import genai | |
| from google.genai import types | |
| # ----------------------------------------------------------------------------- | |
| # 1. CONFIGURATION & INITIALIZATION | |
| # ----------------------------------------------------------------------------- | |
| # Initialize Flask app and CORS | |
| app = Flask(__name__) | |
| CORS(app) | |
| # --- Firebase Initialization --- | |
| try: | |
| credentials_json_string = os.environ.get("FIREBASE") | |
| if not credentials_json_string: | |
| raise ValueError("The FIREBASE environment variable is not set.") | |
| credentials_json = json.loads(credentials_json_string) | |
| firebase_db_url = os.environ.get("Firebase_DB") | |
| firebase_storage_bucket = os.environ.get("Firebase_Storage") | |
| if not firebase_db_url or not firebase_storage_bucket: | |
| raise ValueError("Firebase_DB and Firebase_Storage environment variables must be set.") | |
| cred = credentials.Certificate(credentials_json) | |
| firebase_admin.initialize_app(cred, { | |
| 'databaseURL': firebase_db_url, | |
| 'storageBucket': firebase_storage_bucket | |
| }) | |
| print("Firebase Admin SDK initialized successfully.") | |
| except Exception as e: | |
| print(f"FATAL: Error initializing Firebase: {e}") | |
| exit(1) | |
| # Initialize Firebase services | |
| bucket = storage.bucket() | |
| db_ref = db.reference() | |
| # --- Google GenAI Client Initialization (as per Streamlit app) --- | |
| try: | |
| api_key = os.environ.get("Gemini") | |
| if not api_key: | |
| raise ValueError("The 'Gemini' environment variable for the API key is not set.") | |
| client = genai.Client(api_key=api_key) | |
| print("Google GenAI Client initialized successfully.") | |
| except Exception as e: | |
| print(f"FATAL: Error initializing GenAI Client: {e}") | |
| exit(1) | |
| # --- Model Constants (as per Streamlit app) --- | |
| CATEGORY_MODEL = "gemini-2.0-flash-exp" | |
| GENERATION_MODEL = "gemini-2.0-flash-exp-image-generation" | |
| #TTS_MODEL = "gemini-2.5-flash-preview-tts" | |
| # ----------------------------------------------------------------------------- | |
| # 2. HELPER FUNCTIONS (Adapted directly from Streamlit App & Template) | |
| # ----------------------------------------------------------------------------- | |
| def verify_token(auth_header): | |
| """Verifies the Firebase ID token from the Authorization header.""" | |
| if not auth_header or not auth_header.startswith('Bearer '): | |
| return None | |
| token = auth_header.split('Bearer ')[1] | |
| try: | |
| decoded_token = auth.verify_id_token(token) | |
| return decoded_token['uid'] | |
| except Exception as e: | |
| print(f"Token verification failed: {e}") | |
| return None | |
| def verify_admin(auth_header): | |
| """Verifies if the user is an admin.""" | |
| uid = verify_token(auth_header) | |
| if not uid: | |
| raise PermissionError('Invalid or missing user token') | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data or not user_data.get('is_admin', False): | |
| raise PermissionError('Admin access required') | |
| return uid | |
| def upload_to_storage(data_bytes, destination_blob_name, content_type): | |
| """Uploads a bytes object to Firebase Storage and returns its public URL.""" | |
| blob = bucket.blob(destination_blob_name) | |
| blob.upload_from_string(data_bytes, content_type=content_type) | |
| blob.make_public() | |
| return blob.public_url | |
| def parse_numbered_steps(text): | |
| """Helper to parse numbered steps out of Gemini text.""" | |
| text = "\n" + text | |
| steps_found = re.findall(r"\n\s*(\d+)\.\s*(.*)", text, re.MULTILINE) | |
| return [{"stepNumber": int(num), "text": desc.strip()} for num, desc in steps_found] | |
| def _convert_pcm_to_wav(pcm_data, sample_rate=24000, channels=1, sample_width=2): | |
| """Wraps raw PCM audio data in a WAV container in memory.""" | |
| audio_buffer = io.BytesIO() | |
| with wave.open(audio_buffer, 'wb') as wf: | |
| wf.setnchannels(channels) | |
| wf.setsampwidth(sample_width) | |
| wf.setframerate(sample_rate) | |
| wf.writeframes(pcm_data) | |
| audio_buffer.seek(0) | |
| return audio_buffer.getvalue() | |
| #Gemini tts implementation SOTA but slow | |
| ''' | |
| def generate_tts_audio_and_upload(text_to_speak, uid, project_id, step_num): | |
| """Generates audio using the exact method from the Streamlit app and uploads it.""" | |
| try: | |
| response = client.models.generate_content( | |
| model=TTS_MODEL, | |
| contents=f"""You are an articulate AI assistant — confident and precise like Jarvis.Rephrase the instruction naturally using simple expert language. | |
| Speak with a brisk, clear British accent. | |
| Avoid reading word for word — explain it like you know it. | |
| No quips or acknowledging the prompt just narrate this step: | |
| {text_to_speak}""", | |
| config=types.GenerateContentConfig( | |
| response_modalities=["AUDIO"], | |
| speech_config=types.SpeechConfig( | |
| voice_config=types.VoiceConfig( | |
| prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name='Sadaltager') | |
| ) | |
| ), | |
| ) | |
| ) | |
| audio_part = response.candidates[0].content.parts[0] | |
| audio_data = audio_part.inline_data.data | |
| mime_type = audio_part.inline_data.mime_type | |
| final_audio_bytes = _convert_pcm_to_wav(audio_data) if 'pcm' in mime_type else audio_data | |
| audio_path = f"users/{uid}/projects/{project_id}/narrations/step_{step_num}.wav" | |
| return upload_to_storage(final_audio_bytes, audio_path, 'audio/wav') | |
| except Exception as e: | |
| print(f"Error during TTS generation for step {step_num}: {e}") | |
| return None | |
| ''' | |
| # DeepGram faster and efficient | |
| def generate_tts_audio_and_upload(text_to_speak, uid, project_id, step_num): | |
| """ | |
| Generates audio using the Deepgram TTS API and uploads it to Firebase Storage. | |
| This is a drop-in replacement for the previous Google GenAI TTS function. | |
| """ | |
| try: | |
| # --- Step 1: Get the Deepgram API Key from environment variables --- | |
| api_key = os.environ.get("DEEPGRAM_API_KEY") | |
| if not api_key: | |
| print("FATAL: DEEPGRAM_API_KEY environment variable not set.") | |
| return None | |
| # --- Step 2: Define the API endpoint and headers --- | |
| # The model 'aura-2-draco-en' is specified as a query parameter in the URL. | |
| DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-2-draco-en" | |
| headers = { | |
| "Authorization": f"Token {api_key}", | |
| "Content-Type": "text/plain" # As per Deepgram's requirement for this type of request | |
| } | |
| # --- Step 3: Make the API call to Deepgram --- | |
| # Deepgram expects the raw text as the request body, not in a JSON object. | |
| # We send the text directly in the 'data' parameter. | |
| response = requests.post(DEEPGRAM_URL, headers=headers, data=text_to_speak.encode('utf-8')) | |
| # Raise an exception for bad status codes (4xx or 5xx) | |
| response.raise_for_status() | |
| # The raw audio data is in the response content | |
| audio_data = response.content | |
| # --- Step 4: Upload the received audio to Firebase Storage --- | |
| # The output format from this Deepgram model is MP3. | |
| audio_path = f"users/{uid}/projects/{project_id}/narrations/step_{step_num}.mp3" | |
| # The MIME type for MP3 is 'audio/mpeg'. | |
| narration_url = upload_to_storage(audio_data, audio_path, 'audio/mpeg') | |
| return narration_url | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error during Deepgram API call for step {step_num}: {e}") | |
| # Log the response body if available for more detailed error info | |
| if e.response is not None: | |
| print(f"Deepgram Error Response: {e.response.text}") | |
| return None | |
| except Exception as e: | |
| print(f"An unexpected error occurred during TTS generation for step {step_num}: {e}") | |
| return None | |
| def send_text_request(model_name, prompt, image): | |
| """Helper to send requests that expect only a text response.""" | |
| try: | |
| chat = client.chats.create(model=model_name) | |
| response = chat.send_message([prompt, image]) | |
| response_text = "".join(part.text for part in response.candidates[0].content.parts if hasattr(part, 'text')) | |
| return response_text.strip() | |
| except Exception as e: | |
| print(f"Error with model {model_name}: {e}") | |
| return None | |
| import logging | |
| # Configure logging at the top of your file if not already done | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # ============================================================================= | |
| # OPEN IMAGE PROXY ENDPOINT (NO AUTHENTICATION) | |
| # ============================================================================= | |
| def image_proxy(): | |
| image_url = request.args.get('url') | |
| logger.info(f"[IMAGE PROXY] Received URL: {image_url}") | |
| if not image_url: | |
| logger.error("[IMAGE PROXY] ERROR: URL parameter is missing") | |
| return jsonify({'error': 'URL parameter is missing.'}), 400 | |
| try: | |
| # Parse Firebase Storage URL | |
| # Expected format: https://storage.googleapis.com/bucket-name/path/to/file.ext | |
| if 'storage.googleapis.com' not in image_url: | |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid Firebase Storage URL: {image_url}") | |
| return jsonify({'error': 'Invalid Firebase Storage URL.'}), 400 | |
| logger.info(f"[IMAGE PROXY] Parsing URL: {image_url}") | |
| # Extract bucket name and blob path from the URL | |
| url_parts = image_url.split('storage.googleapis.com/')[1] | |
| logger.info(f"[IMAGE PROXY] URL parts after split: {url_parts}") | |
| # Remove query parameters if present | |
| url_parts = url_parts.split('?')[0] | |
| logger.info(f"[IMAGE PROXY] URL parts after removing query params: {url_parts}") | |
| # Split into bucket name and blob path | |
| path_components = url_parts.split('/', 1) | |
| logger.info(f"[IMAGE PROXY] Path components: {path_components}") | |
| if len(path_components) < 2: | |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid URL format - path_components: {path_components}") | |
| return jsonify({'error': 'Invalid URL format.'}), 400 | |
| url_bucket_name = path_components[0] | |
| blob_path = path_components[1] | |
| logger.info(f"[IMAGE PROXY] Extracted bucket name: {url_bucket_name}") | |
| logger.info(f"[IMAGE PROXY] Extracted blob path: {blob_path}") | |
| # Verify bucket name matches (optional security check) | |
| expected_bucket_name = bucket.name | |
| logger.info(f"[IMAGE PROXY] Expected bucket name: {expected_bucket_name}") | |
| if url_bucket_name != expected_bucket_name: | |
| logger.error(f"[IMAGE PROXY] ERROR: Bucket name mismatch - URL: {url_bucket_name}, Expected: {expected_bucket_name}") | |
| return jsonify({'error': 'Bucket name mismatch.'}), 403 | |
| logger.info(f"[IMAGE PROXY] Creating blob object for path: {blob_path}") | |
| # Get the blob | |
| blob = bucket.blob(blob_path) | |
| logger.info(f"[IMAGE PROXY] Checking if blob exists...") | |
| if not blob.exists(): | |
| logger.error(f"[IMAGE PROXY] ERROR: Image not found at path: {blob_path}") | |
| return jsonify({'error': 'Image not found.'}), 404 | |
| logger.info(f"[IMAGE PROXY] Downloading blob...") | |
| # Download and return the image | |
| image_bytes = blob.download_as_bytes() | |
| content_type = blob.content_type or 'application/octet-stream' | |
| logger.info(f"[IMAGE PROXY] Successfully downloaded {len(image_bytes)} bytes, content-type: {content_type}") | |
| # Add cache headers for better performance | |
| response = Response(image_bytes, content_type=content_type) | |
| response.headers['Cache-Control'] = 'public, max-age=3600' # Cache for 1 hour | |
| return response | |
| except IndexError as e: | |
| logger.error(f"[IMAGE PROXY] URL parsing IndexError: {e}") | |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") | |
| return jsonify({'error': 'Invalid URL format.'}), 400 | |
| except Exception as e: | |
| # This will catch parsing errors or other unexpected issues. | |
| logger.error(f"[IMAGE PROXY] Unexpected error: {e}") | |
| logger.error(f"[IMAGE PROXY] Error type: {type(e).__name__}") | |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") | |
| import traceback | |
| logger.error(f"[IMAGE PROXY] Full traceback: {traceback.format_exc()}") | |
| return jsonify({'error': 'Internal server error processing the image request.'}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 3. AUTHENTICATION & USER MANAGEMENT (Corrected Version) | |
| # ----------------------------------------------------------------------------- | |
| def signup(): | |
| """ | |
| Handles new user sign-up with email/password. | |
| ✅ FIX: Now accepts an optional 'displayName' and saves it. | |
| """ | |
| try: | |
| data = request.get_json() | |
| email, password = data.get('email'), data.get('password') | |
| # Get the optional displayName | |
| display_name = data.get('displayName') # Will be None if not provided | |
| if not email or not password: | |
| return jsonify({'error': 'Email and password are required'}), 400 | |
| # Create the user in Firebase Authentication, including the displayName if available | |
| user = auth.create_user( | |
| email=email, | |
| password=password, | |
| display_name=display_name | |
| ) | |
| # Create the corresponding user profile in the Realtime Database | |
| user_ref = db_ref.child(f'users/{user.uid}') | |
| user_data = { | |
| 'email': email, | |
| 'displayName': display_name, # Save the name to the database | |
| 'credits': 15, | |
| 'is_admin': False, | |
| 'createdAt': datetime.utcnow().isoformat() | |
| } | |
| user_ref.set(user_data) | |
| logger.info(f"New user signed up: {user.uid}, Name: {display_name}") | |
| return jsonify({'success': True, 'uid': user.uid, **user_data}), 201 | |
| except Exception as e: | |
| logger.error(f"Signup failed: {e}") | |
| # Provide a more specific error for existing users | |
| if 'EMAIL_EXISTS' in str(e): | |
| return jsonify({'error': 'An account with this email already exists.'}), 409 | |
| return jsonify({'error': str(e)}), 400 | |
| def social_signin(): | |
| """ | |
| Ensures a user record exists and is up-to-date in the Realtime Database. | |
| ✅ IMPROVEMENT: Now backfills the displayName for existing social users | |
| if it's missing from the database. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if user_data: | |
| # User already exists. Check if their displayName is missing. | |
| if 'displayName' not in user_data or user_data['displayName'] is None: | |
| try: | |
| # The name is missing in our DB, let's sync it from Auth. | |
| firebase_user = auth.get_user(uid) | |
| if firebase_user.display_name: | |
| logger.info(f"Backfilling missing displayName for existing user {uid}.") | |
| user_ref.update({'displayName': firebase_user.display_name}) | |
| # Get the updated data to return to the client | |
| user_data = user_ref.get() | |
| except Exception as e: | |
| logger.error(f"Could not backfill displayName for user {uid}: {e}") | |
| return jsonify({'uid': uid, **user_data}), 200 | |
| else: | |
| # This is a new user (first social login), create their full profile. | |
| logger.info(f"New social user detected: {uid}. Creating database profile.") | |
| try: | |
| firebase_user = auth.get_user(uid) | |
| new_user_data = { | |
| 'email': firebase_user.email, | |
| 'displayName': firebase_user.display_name, | |
| 'credits': 15, | |
| 'is_admin': False, | |
| 'createdAt': datetime.utcnow().isoformat() | |
| } | |
| user_ref.set(new_user_data) | |
| logger.info(f"Successfully created profile for new social user: {uid}") | |
| return jsonify({'success': True, 'uid': uid, **new_user_data}), 201 | |
| except Exception as e: | |
| logger.error(f"Error creating profile for new social user {uid}: {e}") | |
| return jsonify({'error': f'Failed to create user profile: {str(e)}'}), 500 | |
| def get_user_profile(): | |
| """ | |
| Retrieves the user's profile from the Realtime Database. | |
| ✅ FIX: This now correctly includes the 'displayName' in the response. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| user_data = db_ref.child(f'users/{uid}').get() | |
| if not user_data: | |
| return jsonify({'error': 'User not found'}), 404 | |
| return jsonify({'uid': uid, **user_data}) | |
| def update_user_profile(): | |
| """ | |
| ✅ NEW: Allows a logged-in user to update their profile, specifically their displayName. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| data = request.get_json() | |
| new_display_name = data.get('displayName') | |
| if not new_display_name or not isinstance(new_display_name, str) or len(new_display_name.strip()) == 0: | |
| return jsonify({'error': 'A valid displayName is required.'}), 400 | |
| try: | |
| # Step 1: Update the user record in Firebase Authentication | |
| auth.update_user(uid, display_name=new_display_name) | |
| # Step 2: Update the user profile in the Realtime Database | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_ref.update({'displayName': new_display_name}) | |
| logger.info(f"User {uid} updated their displayName to '{new_display_name}'.") | |
| return jsonify({'success': True, 'message': 'Profile updated successfully.'}), 200 | |
| except Exception as e: | |
| logger.error(f"Error updating profile for user {uid}: {e}") | |
| return jsonify({'error': f'Failed to update profile: {str(e)}'}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 4. FEEDBACK AND CREDIT REQUESTS (USER-FACING) | |
| # ----------------------------------------------------------------------------- | |
| def submit_feedback(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 | |
| try: | |
| data = request.get_json() | |
| if not data or not data.get('message'): return jsonify({'error': 'Message is required'}), 400 | |
| user_email = (db_ref.child(f'users/{uid}').get() or {}).get('email', 'unknown') | |
| feedback_ref = db_ref.child('feedback').push() | |
| feedback_record = { | |
| "feedbackId": feedback_ref.key, | |
| "userId": uid, | |
| "userEmail": user_email, | |
| "type": data.get('type', 'general'), | |
| "message": data.get('message'), | |
| "createdAt": datetime.utcnow().isoformat(), | |
| "status": "open" | |
| } | |
| feedback_ref.set(feedback_record) | |
| return jsonify({"success": True, "feedbackId": feedback_ref.key}), 201 | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def request_credits(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 | |
| try: | |
| data = request.get_json() | |
| if not data or 'requested_credits' not in data: return jsonify({'error': 'requested_credits is required'}), 400 | |
| request_ref = db_ref.child('credit_requests').push() | |
| request_ref.set({ | |
| 'requestId': request_ref.key, | |
| 'userId': uid, | |
| 'requested_credits': data['requested_credits'], | |
| 'status': 'pending', | |
| 'requestedAt': datetime.utcnow().isoformat() | |
| }) | |
| return jsonify({'success': True, 'requestId': request_ref.key}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 5. ADMIN ENDPOINTS | |
| # ----------------------------------------------------------------------------- | |
| def get_admin_profile(): | |
| try: | |
| admin_uid = verify_admin(request.headers.get('Authorization')) | |
| # Fetch all necessary data from Firebase in one go | |
| all_users = db_ref.child('users').get() or {} | |
| all_projects = db_ref.child('projects').get() or {} | |
| all_feedback = db_ref.child('feedback').get() or {} | |
| all_credit_requests = db_ref.child('credit_requests').get() or {} | |
| # --- User Statistics Calculation --- | |
| total_users = len(all_users) | |
| admin_count = 0 | |
| total_credits_in_system = 0 | |
| new_users_last_7_days = 0 | |
| seven_days_ago = datetime.utcnow() - timedelta(days=7) | |
| for user_data in all_users.values(): | |
| if user_data.get('is_admin', False): | |
| admin_count += 1 | |
| total_credits_in_system += user_data.get('credits', 0) | |
| # Check for new users | |
| try: | |
| created_at_str = user_data.get('createdAt') | |
| if created_at_str: | |
| # Accommodate different possible ISO formats | |
| user_created_at = datetime.fromisoformat(created_at_str.replace('Z', '+00:00')) | |
| if user_created_at.replace(tzinfo=None) > seven_days_ago: | |
| new_users_last_7_days += 1 | |
| except (ValueError, TypeError): | |
| # Ignore if date format is invalid or missing | |
| pass | |
| # --- Project Statistics Calculation --- | |
| total_projects = len(all_projects) | |
| projects_by_status = { | |
| "awaiting_approval": 0, | |
| "awaiting_selection": 0, | |
| "ready": 0, | |
| "unknown": 0 | |
| } | |
| projects_by_category = {} | |
| for project_data in all_projects.values(): | |
| # Tally by status | |
| status = project_data.get('status', 'unknown') | |
| projects_by_status[status] = projects_by_status.get(status, 0) + 1 | |
| # Tally by category | |
| category = project_data.get('category', 'N/A') | |
| projects_by_category[category] = projects_by_category.get(category, 0) + 1 | |
| # --- System Health Calculation --- | |
| open_feedback_count = sum(1 for fb in all_feedback.values() if fb.get('status') == 'open') | |
| pending_requests_count = sum(1 for req in all_credit_requests.values() if req.get('status') == 'pending') | |
| # Assemble the final response object | |
| admin_personal_data = all_users.get(admin_uid, {}) | |
| response_data = { | |
| 'uid': admin_uid, | |
| 'email': admin_personal_data.get('email'), | |
| 'credits': admin_personal_data.get('credits'), | |
| 'is_admin': True, | |
| 'dashboardStats': { | |
| 'users': { | |
| 'total': total_users, | |
| 'admins': admin_count, | |
| 'regular': total_users - admin_count, | |
| 'newLast7Days': new_users_last_7_days, | |
| 'totalCreditsInSystem': total_credits_in_system | |
| }, | |
| 'projects': { | |
| 'total': total_projects, | |
| 'byStatus': projects_by_status, | |
| 'byCategory': projects_by_category | |
| }, | |
| 'system': { | |
| 'openFeedback': open_feedback_count, | |
| 'pendingCreditRequests': pending_requests_count | |
| } | |
| } | |
| } | |
| return jsonify(response_data), 200 | |
| except PermissionError as e: | |
| return jsonify({'error': str(e)}), 403 # Use 403 Forbidden for permission issues | |
| except Exception as e: | |
| print(traceback.format_exc()) | |
| return jsonify({'error': f"An internal error occurred: {e}"}), 500 | |
| def list_credit_requests(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| requests_data = db_ref.child('credit_requests').get() or {} | |
| return jsonify(list(requests_data.values())) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def process_credit_request(request_id): | |
| try: | |
| admin_uid = verify_admin(request.headers.get('Authorization')) | |
| req_ref = db_ref.child(f'credit_requests/{request_id}') | |
| req_data = req_ref.get() | |
| if not req_data: return jsonify({'error': 'Credit request not found'}), 404 | |
| decision = request.json.get('decision') | |
| if decision not in ['approved', 'declined']: return jsonify({'error': 'Decision must be "approved" or "declined"'}), 400 | |
| if decision == 'approved': | |
| user_ref = db_ref.child(f'users/{req_data["userId"]}') | |
| user_data = user_ref.get() | |
| if user_data: | |
| new_total = user_data.get('credits', 0) + int(req_data.get('requested_credits', 0)) | |
| user_ref.update({'credits': new_total}) | |
| req_ref.update({'status': decision, 'processedBy': admin_uid, 'processedAt': datetime.utcnow().isoformat()}) | |
| return jsonify({'success': True, 'message': f'Request {decision}.'}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_view_feedback(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| feedback_data = db_ref.child('feedback').get() or {} | |
| return jsonify(list(feedback_data.values())) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_list_users(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| all_users = db_ref.child('users').get() or {} | |
| user_list = [{'uid': uid, **data} for uid, data in all_users.items()] | |
| return jsonify(user_list) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_update_credits(uid): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| add_credits = request.json.get('add_credits') | |
| if add_credits is None: return jsonify({'error': 'add_credits is required'}), 400 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data: return jsonify({'error': 'User not found'}), 404 | |
| new_total = user_data.get('credits', 0) + float(add_credits) | |
| user_ref.update({'credits': new_total}) | |
| return jsonify({'success': True, 'new_total_credits': new_total}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 6. DIY PROJECT ENDPOINTS (Core Logic) | |
| # ----------------------------------------------------------------------------- | |
| # (The project endpoints from the previous answer go here, unchanged) | |
| def create_project(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data or user_data.get('credits', 0) < 1: | |
| return jsonify({'error': 'Insufficient credits'}), 402 | |
| if 'image' not in request.files: | |
| return jsonify({'error': 'Image file is required'}), 400 | |
| image_file = request.files['image'] | |
| context_text = request.form.get('contextText', '') | |
| image_bytes = image_file.read() | |
| pil_image = Image.open(io.BytesIO(image_bytes)) | |
| try: | |
| category_prompt = ( | |
| "You are an expert DIY assistant. Analyze the user's image and context. " | |
| f"Context: '{context_text}'. " | |
| "Categorize the project into ONE of the following: " | |
| "Home Appliance Repair, Automotive Maintenance, Gardening & Urban Farming, " | |
| "Upcycling & Sustainable Crafts, or DIY Project Creation. " | |
| "Reply with ONLY the category name." | |
| ) | |
| category = send_text_request(CATEGORY_MODEL, category_prompt, pil_image) | |
| if not category: return jsonify({'error': 'Failed to get project category from AI.'}), 500 | |
| plan_prompt = f""" | |
| You are an expert DIY assistant in the category: {category}. | |
| User Context: "{context_text if context_text else 'No context provided.'}" | |
| Based on the image and context, perform the following: | |
| 1. **Title:** Create a short, clear title for this project. | |
| 2. **Description:** Write a brief, one-paragraph description of the goal. | |
| 3. **Initial Plan:** | |
| - If 'Upcycling & Sustainable Crafts' AND no specific project is mentioned, propose three distinct project options as a numbered list under "UPCYCLING OPTIONS:". | |
| - For all other cases, briefly outline the main stages of the proposed solution. | |
| Structure your response EXACTLY like this: | |
| TITLE: [Your title] | |
| DESCRIPTION: [Your description] | |
| INITIAL PLAN: | |
| [Your plan or 3 options] | |
| """ | |
| plan_response = send_text_request(GENERATION_MODEL, plan_prompt, pil_image) | |
| if not plan_response: return jsonify({'error': 'Failed to generate project plan from AI.'}), 500 | |
| title = re.search(r"TITLE:\s*(.*)", plan_response).group(1).strip() | |
| description = re.search(r"DESCRIPTION:\s*(.*)", plan_response, re.DOTALL).group(1).strip() | |
| initial_plan_text = re.search(r"INITIAL PLAN:\s*(.*)", plan_response, re.DOTALL).group(1).strip() | |
| upcycling_options = re.findall(r"^\s*\d+\.\s*(.*)", initial_plan_text, re.MULTILINE) if "UPCYCLING OPTIONS:" in initial_plan_text else [] | |
| initial_plan = initial_plan_text if not upcycling_options else "" | |
| status = "awaiting_selection" if upcycling_options else "awaiting_approval" | |
| project_id = str(uuid.uuid4()) | |
| image_path = f"users/{uid}/projects/{project_id}/initial_image.png" | |
| image_url = upload_to_storage(image_bytes, image_path, content_type=image_file.content_type) | |
| project_data = { | |
| "uid": uid, "projectId": project_id, "status": status, "createdAt": datetime.utcnow().isoformat(), | |
| "userImageURL": image_url, "contextText": context_text, "projectTitle": title, | |
| "projectDescription": description, "category": category, "initialPlan": initial_plan, | |
| "upcyclingOptions": upcycling_options, "toolsList": [], "steps": [] | |
| } | |
| db_ref.child(f'projects/{project_id}').set(project_data) | |
| user_ref.update({'credits': user_data.get('credits', 0) - 1}) | |
| return jsonify(project_data), 201 | |
| except Exception as e: | |
| print(traceback.format_exc()) | |
| return jsonify({'error': f"An error occurred: {e}"}), 500 | |
| # -------------------------------------------------------- | |
| def approve_project_plan(project_id): | |
| start_time = time.time() | |
| logger.info(f"[PROJECT APPROVAL] ▶️ Begin process • project={project_id}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 1. AUTH | |
| # ─────────────────────────────────────────────────────────── | |
| auth_t0 = time.time() | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| logger.error(f"[PROJECT APPROVAL] ❌ Unauthorized • project={project_id}") | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| auth_time = time.time() - auth_t0 | |
| logger.info(f"[TIMING] auth={auth_time:.3f}s • uid={uid}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 2. USER / CREDIT CHECK | |
| # ─────────────────────────────────────────────────────────── | |
| uc_t0 = time.time() | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| credits = user_data.get('credits', 0) if user_data else 0 | |
| if credits < 5: | |
| logger.error(f"[PROJECT APPROVAL] ❌ Insufficient credits • uid={uid} credits={credits}") | |
| return jsonify({'error': 'Insufficient credits'}), 402 | |
| uc_time = time.time() - uc_t0 | |
| logger.info(f"[TIMING] user_fetch={uc_time:.3f}s • credits={credits}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 3. PROJECT FETCH | |
| # ─────────────────────────────────────────────────────────── | |
| pf_t0 = time.time() | |
| project_ref = db_ref.child(f'projects/{project_id}') | |
| project_data = project_ref.get() | |
| if not project_data or project_data.get('uid') != uid: | |
| logger.error(f"[PROJECT APPROVAL] ❌ Not found / access denied • project={project_id} uid={uid}") | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| pf_time = time.time() - pf_t0 | |
| logger.info(f"[TIMING] project_fetch={pf_time:.3f}s • title='{project_data.get('projectTitle','?')}'") | |
| # ─────────────────────────────────────────────────────────── | |
| # 4. DOUBLE-APPROVAL GUARD | |
| # ─────────────────────────────────────────────────────────── | |
| if project_data.get('status') == 'ready': | |
| logger.warning(f"[PROJECT APPROVAL] ⏹ Already approved • project={project_id}") | |
| return jsonify({'error': 'Project is already approved'}), 409 | |
| # ─────────────────────────────────────────────────────────── | |
| # 5. VALIDATE selectedOption | |
| # ─────────────────────────────────────────────────────────── | |
| val_t0 = time.time() | |
| selected_option = request.json.get('selectedOption') | |
| upcycling_options = project_data.get('upcyclingOptions', []) | |
| if upcycling_options: # choice required | |
| if not selected_option: | |
| logger.error(f"[PROJECT APPROVAL] ❌ Option required but missing • project={project_id}") | |
| return jsonify({'error': 'You must choose an option before approving.'}), 400 | |
| if selected_option not in upcycling_options: | |
| logger.error(f"[PROJECT APPROVAL] ❌ Invalid option • chosen='{selected_option}'") | |
| return jsonify({'error': 'Invalid option selected.'}), 400 | |
| else: # no options at all | |
| selected_option = None | |
| val_time = time.time() - val_t0 | |
| logger.info(f"[TIMING] validation={val_time:.3f}s • selected='{selected_option}' options={len(upcycling_options)}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 6. IMAGE DOWNLOAD / PROCESSING | |
| # ─────────────────────────────────────────────────────────── | |
| img_dl_t0 = time.time() | |
| resp = requests.get(project_data['userImageURL']) | |
| pil_image = Image.open(io.BytesIO(resp.content)).convert('RGB') | |
| img_dl_time = time.time() - img_dl_t0 | |
| logger.info(f"[TIMING] image_download+proc={img_dl_time:.3f}s • bytes={len(resp.content)}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 7. PROMPT + AI GENERATION | |
| # ─────────────────────────────────────────────────────────── | |
| prompt_t0 = time.time() | |
| context_line = ( | |
| f"The user chose the upcycling project: '{selected_option}'." | |
| if selected_option | |
| else f"The user has approved the plan for '{project_data['projectTitle']}'." | |
| ) | |
| detailed_prompt = f""" | |
| You are a DIY expert. The user wants to proceed with the project titled "{project_data['projectTitle']}". | |
| {context_line} | |
| Provide a detailed guide. For each step, you MUST provide a simple, clear illustrative image. | |
| TOOLS AND MATERIALS: | |
| - Tool A | |
| - Material B | |
| STEPS(Maximum 5 steps): | |
| 1. First step instructions. | |
| 2. Second step instructions... | |
| """ | |
| chat = client.chats.create( | |
| model=GENERATION_MODEL, | |
| config=types.GenerateContentConfig(response_modalities=["Text", "Image"]) | |
| ) | |
| full_resp = chat.send_message([detailed_prompt, pil_image]) | |
| prompt_time = time.time() - prompt_t0 | |
| logger.info(f"[TIMING] ai_generation={prompt_time:.3f}s") | |
| # ─────────────────────────────────────────────────────────── | |
| # 8. PARSE AI RESPONSE | |
| # ─────────────────────────────────────────────────────────── | |
| parse_t0 = time.time() | |
| gen_parts = full_resp.candidates[0].content.parts | |
| combined_text, inline_images = "", [] | |
| for part in gen_parts: | |
| if part.text: | |
| combined_text += part.text + "\n" | |
| if part.inline_data: | |
| inline_images.append(Image.open(io.BytesIO(part.inline_data.data)).convert('RGB')) | |
| tools_section = re.search(r"TOOLS AND MATERIALS:\s*(.*?)\s*STEPS:", combined_text, re.DOTALL).group(1).strip() | |
| steps_section = re.search(r"STEPS:\s*(.*)", combined_text, re.DOTALL).group(1).strip() | |
| tools_list = [ln.strip("- ").strip() for ln in tools_section.split('\n') if ln.strip()] | |
| parsed_steps = parse_numbered_steps(steps_section) | |
| if len(parsed_steps) != len(inline_images): | |
| logger.error("[PROJECT APPROVAL] ❌ Steps/images mismatch") | |
| return jsonify({'error': 'AI response mismatch: Steps and images do not match.'}), 500 | |
| parse_time = time.time() - parse_t0 | |
| logger.info(f"[TIMING] parse_response={parse_time:.3f}s • steps={len(parsed_steps)} imgs={len(inline_images)}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 9. STEP PROCESSING (uploads + TTS) | |
| # ─────────────────────────────────────────────────────────── | |
| steps_t0 = time.time() | |
| total_upload, total_tts, final_steps = 0.0, 0.0, [] | |
| for idx, step_info in enumerate(parsed_steps, 1): | |
| # image upload | |
| up_t0 = time.time() | |
| buf = io.BytesIO() | |
| inline_images[idx-1].save(buf, format='JPEG', optimize=True, quality=70) | |
| img_path = f"users/{uid}/projects/{project_id}/steps/step_{idx}_image.jpg" | |
| img_url = upload_to_storage(buf.getvalue(), img_path, 'image/jpeg') | |
| total_upload += time.time() - up_t0 | |
| # TTS | |
| tts_t0 = time.time() | |
| narr_url = generate_tts_audio_and_upload(step_info['text'], uid, project_id, idx) | |
| total_tts += time.time() - tts_t0 | |
| step_info.update({ | |
| "imageUrl": img_url, | |
| "narrationUrl": narr_url, | |
| "isDone": False, | |
| "notes": "" | |
| }) | |
| final_steps.append(step_info) | |
| steps_time = time.time() - steps_t0 | |
| logger.info(f"[TIMING] steps_process={steps_time:.3f}s • uploads={total_upload:.3f}s tts={total_tts:.3f}s") | |
| # ─────────────────────────────────────────────────────────── | |
| # 10. DATABASE UPDATE (clear menu!) | |
| # ─────────────────────────────────────────────────────────── | |
| db_t0 = time.time() | |
| project_ref.update({ | |
| "status": "ready", | |
| "selectedOption": selected_option or "", | |
| "upcyclingOptions": [], # => gone forever | |
| "toolsList": tools_list, | |
| "steps": final_steps | |
| }) | |
| db_time = time.time() - db_t0 | |
| logger.info(f"[TIMING] db_update={db_time:.3f}s") | |
| # credits deduction | |
| credits_t0 = time.time() | |
| user_ref.update({'credits': credits - 5}) | |
| credits_time = time.time() - credits_t0 | |
| logger.info(f"[TIMING] credits_update={credits_time:.3f}s • new_credits={credits-5}") | |
| # ─────────────────────────────────────────────────────────── | |
| # 11. FINAL FETCH & RETURN | |
| # ─────────────────────────────────────────────────────────── | |
| final_fetch_t0 = time.time() | |
| updated_project = project_ref.get() | |
| updated_project["projectId"] = project_id | |
| fetch_time = time.time() - final_fetch_t0 | |
| total_time = time.time() - start_time | |
| logger.info( | |
| f"[PROJECT APPROVAL] ✅ SUCCESS • total={total_time:.3f}s " | |
| f"(fetch_final={fetch_time:.3f}s)" | |
| ) | |
| return jsonify(updated_project), 200 | |
| def list_projects(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| projects = (db_ref.child('projects').order_by_child('uid').equal_to(uid).get() or {}).values() | |
| return jsonify(list(projects)) | |
| def get_project(project_id): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| project_data = db_ref.child(f'projects/{project_id}').get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| return jsonify(project_data) | |
| def update_step(project_id, step_number): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| data = request.get_json() | |
| if data is None: return jsonify({'error': 'JSON body is required'}), 400 | |
| project_data = db_ref.child(f'projects/{project_id}').get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| steps = project_data.get('steps', []) | |
| step_index = next((i for i, s in enumerate(steps) if s.get('stepNumber') == step_number), -1) | |
| if step_index == -1: return jsonify({'error': f'Step number {step_number} not found'}), 404 | |
| step_path = f'projects/{project_id}/steps/{step_index}' | |
| if 'isDone' in data: db_ref.child(f'{step_path}/isDone').set(bool(data['isDone'])) | |
| if 'notes' in data: db_ref.child(f'{step_path}/notes').set(str(data['notes'])) | |
| return jsonify({"success": True, "updatedStep": db_ref.child(step_path).get()}) | |
| def delete_project(project_id): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| project_ref = db_ref.child(f'projects/{project_id}') | |
| project_data = project_ref.get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| project_ref.delete() | |
| for blob in bucket.list_blobs(prefix=f"users/{uid}/projects/{project_id}/"): | |
| blob.delete() | |
| return jsonify({"success": True, "message": f"Project {project_id} deleted."}) | |
| #------------------------ | |
| # AI phone call ElevenLabs | |
| #------------------------- | |
| import math | |
| # Fixed server code | |
| AGENT_ID = os.getenv("AGENT_ID", "agent_01jy2d4krmfkn9r7v7wdyqtjct") | |
| ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY") | |
| def initiate_call(project_id): | |
| """ | |
| This is the definitive, correct version. It uses the official 'get-signed-url' | |
| endpoint, which is the only one guaranteed to work for authenticated agents. | |
| """ | |
| logger.info(f"[INITIATE] Received request for project: {project_id}") | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| if not ELEVENLABS_API_KEY: | |
| logger.error("[INITIATE] ELEVENLABS_API_KEY is not set on the server.") | |
| return jsonify({'error': 'Server configuration error.'}), 500 | |
| # This is the correct URL as per the official ElevenLabs documentation and our debug results. | |
| url = f"https://api.elevenlabs.io/v1/convai/conversation/get-signed-url?agent_id={AGENT_ID}" | |
| headers = {"xi-api-key": ELEVENLABS_API_KEY} | |
| try: | |
| response = requests.get(url, headers=headers, timeout=15) | |
| response.raise_for_status() # This will raise an error for 4xx/5xx responses | |
| data = response.json() | |
| signed_url = data.get("signed_url") | |
| if not signed_url: | |
| logger.error("[INITIATE] ElevenLabs response missing 'signed_url'.") | |
| return jsonify({'error': 'Failed to retrieve session URL from provider.'}), 502 | |
| logger.info("[INITIATE] Successfully retrieved signed URL.") | |
| # The React SDK expects a JSON object with the key "signed_url". | |
| return jsonify({"signed_url": signed_url}), 200 | |
| except requests.exceptions.RequestException as e: | |
| logger.error(f"[INITIATE] Error calling ElevenLabs API: {e}") | |
| return jsonify({'error': 'Could not connect to AI service provider.'}), 504 | |
| def test_agent(): | |
| """ | |
| Fixed debug endpoint that tests the CORRECT conversation endpoint. | |
| """ | |
| if not ELEVENLABS_API_KEY: | |
| return jsonify({'error': 'API key not set on server'}), 500 | |
| headers = {"xi-api-key": ELEVENLABS_API_KEY} | |
| results = {'agent_id': AGENT_ID, 'tests': {}} | |
| try: | |
| # Test 1: Check if the agent can be found by its ID. | |
| agent_url = f"https://api.elevenlabs.io/v1/convai/agents/{AGENT_ID}" | |
| agent_resp = requests.get(agent_url, headers=headers, timeout=10) | |
| results['tests']['agent_check'] = { | |
| 'status': agent_resp.status_code, | |
| 'exists': agent_resp.ok | |
| } | |
| # Test 2: Check if we can get a signed URL for this agent. This is the most important test. | |
| conv_url = f"https://api.elevenlabs.io/v1/convai/conversation/get-signed-url?agent_id={AGENT_ID}" | |
| conv_resp = requests.get(conv_url, headers=headers, timeout=10) | |
| results['tests']['get_signed_url_check'] = { | |
| 'status': conv_resp.status_code, | |
| 'url_received': 'signed_url' in conv_resp.json() if conv_resp.ok else False | |
| } | |
| return jsonify(results) | |
| except Exception as e: | |
| return jsonify({'error': str(e), 'agent_id': AGENT_ID}) | |
| def log_call_usage(project_id): | |
| """ | |
| Calculates and deducts credits from a user's account in Firebase | |
| after a call is completed. | |
| """ | |
| logger.info(f"[LOGGING] Received usage log for project: {project_id}") | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| data = request.get_json() | |
| duration_seconds = data.get("durationSeconds") | |
| if duration_seconds is None or not isinstance(duration_seconds, (int, float)): | |
| return jsonify({'error': 'Invalid duration provided.'}), 400 | |
| # Calculate credit cost (3 credits per minute, always rounded up) | |
| minutes = math.ceil(duration_seconds / 60) | |
| cost = minutes * 3 | |
| logger.info(f"[LOGGING] User '{uid}' call duration: {duration_seconds:.2f}s, rounded to {minutes} minute(s). Cost: {cost} credits.") | |
| try: | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if user_data is None: | |
| logger.error(f"[LOGGING] User with UID '{uid}' not found in the database.") | |
| return jsonify({'error': 'User not found.'}), 404 | |
| current_credits = user_data.get('credits', 0) | |
| new_credits = max(0, current_credits - cost) | |
| user_ref.update({'credits': new_credits}) | |
| logger.info(f"[LOGGING] Successfully updated credits for user '{uid}'. Old: {current_credits}, New: {new_credits}") | |
| return jsonify({ | |
| "status": "success", | |
| "creditsDeducted": cost, | |
| "remainingCredits": new_credits | |
| }), 200 | |
| except Exception as e: | |
| logger.error(f"[LOGGING] A database error occurred for user '{uid}': {e}") | |
| return jsonify({'error': 'A server error occurred while updating credits.'}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 7. MAIN EXECUTION | |
| # ----------------------------------------------------------------------------- | |
| if __name__ == '__main__': | |
| app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 7860))) |