Spaces:
Running
Running
| import os | |
| import io | |
| import uuid | |
| import re | |
| import time | |
| import json | |
| import traceback | |
| import wave | |
| from datetime import datetime, timedelta | |
| from flask import Flask, request, jsonify, Response | |
| from flask_cors import CORS | |
| import firebase_admin | |
| from firebase_admin import credentials, db, storage, auth | |
| from PIL import Image | |
| from io import BytesIO | |
| import requests | |
| from elevenlabs import ElevenLabs | |
| # Import and configure Google GenAI, matching the Streamlit app | |
| from google import genai | |
| from google.genai import types | |
| import stripe | |
| # ----------------------------------------------------------------------------- | |
| # 1. CONFIGURATION & INITIALIZATION | |
| # ----------------------------------------------------------------------------- | |
| # Initialize Flask app and CORS | |
| app = Flask(__name__) | |
| CORS(app) | |
| # --- Firebase Initialization --- | |
| try: | |
| credentials_json_string = os.environ.get("FIREBASE") | |
| if not credentials_json_string: | |
| raise ValueError("The FIREBASE environment variable is not set.") | |
| credentials_json = json.loads(credentials_json_string) | |
| firebase_db_url = os.environ.get("Firebase_DB") | |
| firebase_storage_bucket = os.environ.get("Firebase_Storage") | |
| if not firebase_db_url or not firebase_storage_bucket: | |
| raise ValueError("Firebase_DB and Firebase_Storage environment variables must be set.") | |
| cred = credentials.Certificate(credentials_json) | |
| firebase_admin.initialize_app(cred, { | |
| 'databaseURL': firebase_db_url, | |
| 'storageBucket': firebase_storage_bucket | |
| }) | |
| print("Firebase Admin SDK initialized successfully.") | |
| except Exception as e: | |
| print(f"FATAL: Error initializing Firebase: {e}") | |
| exit(1) | |
| # Initialize Firebase services | |
| bucket = storage.bucket() | |
| db_ref = db.reference() | |
| # --- Google GenAI Client Initialization (as per Streamlit app) --- | |
| try: | |
| api_key = os.environ.get("Gemini") | |
| if not api_key: | |
| raise ValueError("The 'Gemini' environment variable for the API key is not set.") | |
| client = genai.Client(api_key=api_key) | |
| print("Google GenAI Client initialized successfully.") | |
| except Exception as e: | |
| print(f"FATAL: Error initializing GenAI Client: {e}") | |
| exit(1) | |
| # --- Model Constants (as per Streamlit app) --- | |
| CATEGORY_MODEL = "gemini-2.5-flash" | |
| GENERATION_MODEL = "gemini-2.0-flash-exp-image-generation" | |
| #GENERATION_MODEL = "gemini-2.5-flash-image-preview" | |
| #TTS_MODEL = "gemini-2.5-flash-preview-tts" | |
| # Stripe | |
| # --- Stripe Initialization --- | |
| STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY") | |
| STRIPE_WEBHOOK_SECRET = os.environ.get("STRIPE_WEBHOOK_SECRET") | |
| # Price IDs from Stripe dashboard (price_xxx...) – set these in your env | |
| STRIPE_PRICE_FIXER = os.environ.get("STRIPE_PRICE_FIXER") # "The Fixer" (standard) | |
| STRIPE_PRICE_PRO = os.environ.get("STRIPE_PRICE_PRO") # "The Pro" (premium) | |
| # Frontend URLs for redirect after Checkout | |
| STRIPE_SUCCESS_URL = os.environ.get( | |
| "STRIPE_SUCCESS_URL", | |
| "https://sozofix.tech/billing/success" | |
| ) | |
| STRIPE_CANCEL_URL = os.environ.get( | |
| "STRIPE_CANCEL_URL", | |
| "https://sozofix.tech/billing/cancel" | |
| ) | |
| if STRIPE_SECRET_KEY: | |
| stripe.api_key = STRIPE_SECRET_KEY | |
| else: | |
| print("WARNING: STRIPE_SECRET_KEY is not set – Stripe endpoints will fail.") | |
| # ----------------------------------------------------------------------------- | |
| # 2. HELPER FUNCTIONS (Adapted directly from Streamlit App & Template) | |
| # ----------------------------------------------------------------------------- | |
| def verify_token(auth_header): | |
| """Verifies the Firebase ID token from the Authorization header.""" | |
| if not auth_header or not auth_header.startswith('Bearer '): | |
| return None | |
| token = auth_header.split('Bearer ')[1] | |
| try: | |
| decoded_token = auth.verify_id_token(token) | |
| return decoded_token['uid'] | |
| except Exception as e: | |
| print(f"Token verification failed: {e}") | |
| return None | |
| def verify_admin(auth_header): | |
| """Verifies if the user is an admin.""" | |
| uid = verify_token(auth_header) | |
| if not uid: | |
| raise PermissionError('Invalid or missing user token') | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data or not user_data.get('is_admin', False): | |
| raise PermissionError('Admin access required') | |
| return uid | |
| def upload_to_storage(data_bytes, destination_blob_name, content_type): | |
| """Uploads a bytes object to Firebase Storage and returns its public URL.""" | |
| blob = bucket.blob(destination_blob_name) | |
| blob.upload_from_string(data_bytes, content_type=content_type) | |
| blob.make_public() | |
| return blob.public_url | |
| def parse_numbered_steps(text): | |
| """Helper to parse numbered steps out of Gemini text.""" | |
| text = "\n" + text | |
| steps_found = re.findall(r"\n\s*(\d+)\.\s*(.*)", text, re.MULTILINE) | |
| return [{"stepNumber": int(num), "text": desc.strip()} for num, desc in steps_found] | |
| def _convert_pcm_to_wav(pcm_data, sample_rate=24000, channels=1, sample_width=2): | |
| """Wraps raw PCM audio data in a WAV container in memory.""" | |
| audio_buffer = io.BytesIO() | |
| with wave.open(audio_buffer, 'wb') as wf: | |
| wf.setnchannels(channels) | |
| wf.setsampwidth(sample_width) | |
| wf.setframerate(sample_rate) | |
| wf.writeframes(pcm_data) | |
| audio_buffer.seek(0) | |
| return audio_buffer.getvalue() | |
| #Gemini tts implementation SOTA but slow | |
| ''' | |
| def generate_tts_audio_and_upload(text_to_speak, uid, project_id, step_num): | |
| """Generates audio using the exact method from the Streamlit app and uploads it.""" | |
| try: | |
| response = client.models.generate_content( | |
| model=TTS_MODEL, | |
| contents=f"""You are an articulate AI assistant — confident and precise like Jarvis.Rephrase the instruction naturally using simple expert language. | |
| Speak with a brisk, clear British accent. | |
| Avoid reading word for word — explain it like you know it. | |
| No quips or acknowledging the prompt just narrate this step: | |
| {text_to_speak}""", | |
| config=types.GenerateContentConfig( | |
| response_modalities=["AUDIO"], | |
| speech_config=types.SpeechConfig( | |
| voice_config=types.VoiceConfig( | |
| prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name='Sadaltager') | |
| ) | |
| ), | |
| ) | |
| ) | |
| audio_part = response.candidates[0].content.parts[0] | |
| audio_data = audio_part.inline_data.data | |
| mime_type = audio_part.inline_data.mime_type | |
| final_audio_bytes = _convert_pcm_to_wav(audio_data) if 'pcm' in mime_type else audio_data | |
| audio_path = f"users/{uid}/projects/{project_id}/narrations/step_{step_num}.wav" | |
| return upload_to_storage(final_audio_bytes, audio_path, 'audio/wav') | |
| except Exception as e: | |
| print(f"Error during TTS generation for step {step_num}: {e}") | |
| return None | |
| ''' | |
| # DeepGram faster and efficient | |
| def generate_tts_audio_and_upload(text_to_speak, uid, project_id, step_num): | |
| """ | |
| Generates audio using the Deepgram TTS API and uploads it to Firebase Storage. | |
| This is a drop-in replacement for the previous Google GenAI TTS function. | |
| """ | |
| try: | |
| # --- Step 1: Get the Deepgram API Key from environment variables --- | |
| api_key = os.environ.get("DEEPGRAM_API_KEY") | |
| if not api_key: | |
| print("FATAL: DEEPGRAM_API_KEY environment variable not set.") | |
| return None | |
| # --- Step 2: Define the API endpoint and headers --- | |
| # The model 'aura-2-draco-en' is specified as a query parameter in the URL. | |
| DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-2-draco-en" | |
| headers = { | |
| "Authorization": f"Token {api_key}", | |
| "Content-Type": "text/plain" # As per Deepgram's requirement for this type of request | |
| } | |
| # --- Step 3: Make the API call to Deepgram --- | |
| # Deepgram expects the raw text as the request body, not in a JSON object. | |
| # We send the text directly in the 'data' parameter. | |
| response = requests.post(DEEPGRAM_URL, headers=headers, data=text_to_speak.encode('utf-8')) | |
| # Raise an exception for bad status codes (4xx or 5xx) | |
| response.raise_for_status() | |
| # The raw audio data is in the response content | |
| audio_data = response.content | |
| # --- Step 4: Upload the received audio to Firebase Storage --- | |
| # The output format from this Deepgram model is MP3. | |
| audio_path = f"users/{uid}/projects/{project_id}/narrations/step_{step_num}.mp3" | |
| # The MIME type for MP3 is 'audio/mpeg'. | |
| narration_url = upload_to_storage(audio_data, audio_path, 'audio/mpeg') | |
| return narration_url | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error during Deepgram API call for step {step_num}: {e}") | |
| # Log the response body if available for more detailed error info | |
| if e.response is not None: | |
| print(f"Deepgram Error Response: {e.response.text}") | |
| return None | |
| except Exception as e: | |
| print(f"An unexpected error occurred during TTS generation for step {step_num}: {e}") | |
| return None | |
| def send_text_request(model_name, prompt, image): | |
| """Helper to send requests that expect only a text response.""" | |
| try: | |
| chat = client.chats.create(model=model_name) | |
| response = chat.send_message([prompt, image]) | |
| response_text = "".join(part.text for part in response.candidates[0].content.parts if hasattr(part, 'text')) | |
| return response_text.strip() | |
| except Exception as e: | |
| print(f"Error with model {model_name}: {e}") | |
| return None | |
| import logging | |
| # Configure logging at the top of your file if not already done | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| ## Stripe | |
| # Stripe plan config | |
| PLAN_CONFIG = { | |
| # The Fixer – standard | |
| "standard": { | |
| "price_id": STRIPE_PRICE_FIXER, | |
| "credits": 200, | |
| "label": "The Fixer", | |
| }, | |
| "fixer": { | |
| "price_id": STRIPE_PRICE_FIXER, | |
| "credits": 200, | |
| "label": "The Fixer", | |
| }, | |
| # The Pro – premium | |
| "premium": { | |
| "price_id": STRIPE_PRICE_PRO, | |
| "credits": 500, | |
| "label": "The Pro", | |
| }, | |
| "pro": { | |
| "price_id": STRIPE_PRICE_PRO, | |
| "credits": 500, | |
| "label": "The Pro", | |
| }, | |
| } | |
| def get_or_create_stripe_customer(uid: str) -> str: | |
| """ | |
| Ensure the Firebase user has a Stripe customer. | |
| Stores the customer id on /users/{uid}/stripeCustomerId. | |
| """ | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() or {} | |
| existing_id = user_data.get("stripeCustomerId") | |
| if existing_id: | |
| return existing_id | |
| email = user_data.get("email") | |
| customer = stripe.Customer.create( | |
| email=email, | |
| metadata={"firebase_uid": uid} | |
| ) | |
| user_ref.update({"stripeCustomerId": customer.id}) | |
| return customer.id | |
| def reset_plan_credits(uid: str, plan_key: str): | |
| """ | |
| HARD RESET the user's credits to the monthly allowance for the given plan. | |
| NO ROLLOVER: | |
| - standard / fixer -> 200 | |
| - premium / pro -> 500 | |
| Any previous balance is overwritten. | |
| """ | |
| plan_cfg = PLAN_CONFIG.get(plan_key) | |
| if not plan_cfg: | |
| logger.error(f"[STRIPE] Unknown plan '{plan_key}' for user {uid}") | |
| return | |
| monthly_credits = float(plan_cfg["credits"]) | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() or {} | |
| previous = user_data.get('credits', 0) | |
| user_ref.update({ | |
| "credits": monthly_credits, | |
| "lastCreditResetAt": datetime.utcnow().isoformat(), | |
| "creditResetPlan": plan_key, | |
| }) | |
| logger.info( | |
| f"[STRIPE] RESET credits for user {uid} to {monthly_credits} " | |
| f"for plan '{plan_key}' (no rollover). Previous balance: {previous}" | |
| ) | |
| def update_user_subscription_from_stripe(subscription: dict): | |
| """ | |
| Syncs Stripe subscription info into the Firebase user document. | |
| Stores: | |
| - currentPlan (key: standard/premium) | |
| - currentPlanLabel ("The Fixer"/"The Pro") | |
| - currentPlanMonthlyCredits (200/500) | |
| - planStatus (active/past_due/canceled/unpaid/etc.) | |
| - planCurrentPeriodEnd (ISO expiry for current period) | |
| - stripeSubscriptionId | |
| - planUpdatedAt | |
| """ | |
| metadata = subscription.get("metadata") or {} | |
| uid = metadata.get("firebase_uid") | |
| plan_key = metadata.get("plan") | |
| if not uid or not plan_key: | |
| logger.error("[STRIPE] Subscription missing firebase_uid or plan in metadata.") | |
| return | |
| plan_cfg = PLAN_CONFIG.get(plan_key, {}) | |
| plan_label = plan_cfg.get("label", plan_key.title()) | |
| monthly_credits = plan_cfg.get("credits", 0) | |
| status = subscription.get("status") # active, past_due, canceled, unpaid, trialing, etc. | |
| current_period_end = subscription.get("current_period_end") # Unix timestamp | |
| if current_period_end: | |
| expiry_iso = datetime.utcfromtimestamp(current_period_end).isoformat() + "Z" | |
| else: | |
| expiry_iso = None | |
| user_ref = db_ref.child(f'users/{uid}') | |
| update_data = { | |
| "currentPlan": plan_key, # "standard" / "premium" | |
| "currentPlanLabel": plan_label, # "The Fixer" / "The Pro" | |
| "currentPlanMonthlyCredits": monthly_credits, # 200 / 500 | |
| "planStatus": status, # active / past_due / canceled / unpaid ... | |
| "planCurrentPeriodEnd": expiry_iso, # ISO timestamp of current period end | |
| "stripeSubscriptionId": subscription.get("id"), | |
| "planUpdatedAt": datetime.utcnow().isoformat(), | |
| } | |
| logger.info( | |
| f"[STRIPE] Updating subscription for user {uid}: " | |
| f"plan={plan_key}, status={status}, expires={expiry_iso}" | |
| ) | |
| user_ref.update(update_data) | |
| # ============================================================================= | |
| # OPEN IMAGE PROXY ENDPOINT (NO AUTHENTICATION) | |
| # ============================================================================= | |
| def image_proxy(): | |
| image_url = request.args.get('url') | |
| logger.info(f"[IMAGE PROXY] Received URL: {image_url}") | |
| if not image_url: | |
| logger.error("[IMAGE PROXY] ERROR: URL parameter is missing") | |
| return jsonify({'error': 'URL parameter is missing.'}), 400 | |
| try: | |
| # Parse Firebase Storage URL | |
| # Expected format: https://storage.googleapis.com/bucket-name/path/to/file.ext | |
| if 'storage.googleapis.com' not in image_url: | |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid Firebase Storage URL: {image_url}") | |
| return jsonify({'error': 'Invalid Firebase Storage URL.'}), 400 | |
| logger.info(f"[IMAGE PROXY] Parsing URL: {image_url}") | |
| # Extract bucket name and blob path from the URL | |
| url_parts = image_url.split('storage.googleapis.com/')[1] | |
| logger.info(f"[IMAGE PROXY] URL parts after split: {url_parts}") | |
| # Remove query parameters if present | |
| url_parts = url_parts.split('?')[0] | |
| logger.info(f"[IMAGE PROXY] URL parts after removing query params: {url_parts}") | |
| # Split into bucket name and blob path | |
| path_components = url_parts.split('/', 1) | |
| logger.info(f"[IMAGE PROXY] Path components: {path_components}") | |
| if len(path_components) < 2: | |
| logger.error(f"[IMAGE PROXY] ERROR: Invalid URL format - path_components: {path_components}") | |
| return jsonify({'error': 'Invalid URL format.'}), 400 | |
| url_bucket_name = path_components[0] | |
| blob_path = path_components[1] | |
| logger.info(f"[IMAGE PROXY] Extracted bucket name: {url_bucket_name}") | |
| logger.info(f"[IMAGE PROXY] Extracted blob path: {blob_path}") | |
| # Verify bucket name matches (optional security check) | |
| expected_bucket_name = bucket.name | |
| logger.info(f"[IMAGE PROXY] Expected bucket name: {expected_bucket_name}") | |
| if url_bucket_name != expected_bucket_name: | |
| logger.error(f"[IMAGE PROXY] ERROR: Bucket name mismatch - URL: {url_bucket_name}, Expected: {expected_bucket_name}") | |
| return jsonify({'error': 'Bucket name mismatch.'}), 403 | |
| logger.info(f"[IMAGE PROXY] Creating blob object for path: {blob_path}") | |
| # Get the blob | |
| blob = bucket.blob(blob_path) | |
| logger.info(f"[IMAGE PROXY] Checking if blob exists...") | |
| if not blob.exists(): | |
| logger.error(f"[IMAGE PROXY] ERROR: Image not found at path: {blob_path}") | |
| return jsonify({'error': 'Image not found.'}), 404 | |
| logger.info(f"[IMAGE PROXY] Downloading blob...") | |
| # Download and return the image | |
| image_bytes = blob.download_as_bytes() | |
| content_type = blob.content_type or 'application/octet-stream' | |
| logger.info(f"[IMAGE PROXY] Successfully downloaded {len(image_bytes)} bytes, content-type: {content_type}") | |
| # Add cache headers for better performance | |
| response = Response(image_bytes, content_type=content_type) | |
| response.headers['Cache-Control'] = 'public, max-age=3600' # Cache for 1 hour | |
| return response | |
| except IndexError as e: | |
| logger.error(f"[IMAGE PROXY] URL parsing IndexError: {e}") | |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") | |
| return jsonify({'error': 'Invalid URL format.'}), 400 | |
| except Exception as e: | |
| # This will catch parsing errors or other unexpected issues. | |
| logger.error(f"[IMAGE PROXY] Unexpected error: {e}") | |
| logger.error(f"[IMAGE PROXY] Error type: {type(e).__name__}") | |
| logger.error(f"[IMAGE PROXY] URL was: {image_url}") | |
| import traceback | |
| logger.error(f"[IMAGE PROXY] Full traceback: {traceback.format_exc()}") | |
| return jsonify({'error': 'Internal server error processing the image request.'}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 3. AUTHENTICATION & USER MANAGEMENT (Corrected Version) | |
| # ----------------------------------------------------------------------------- | |
| def signup(): | |
| """ | |
| Handles new user sign-up with email/password. | |
| ✅ FIX: Now accepts an optional 'displayName' and saves it. | |
| """ | |
| try: | |
| data = request.get_json() | |
| email, password = data.get('email'), data.get('password') | |
| # Get the optional displayName | |
| display_name = data.get('displayName') # Will be None if not provided | |
| if not email or not password: | |
| return jsonify({'error': 'Email and password are required'}), 400 | |
| # Create the user in Firebase Authentication, including the displayName if available | |
| user = auth.create_user( | |
| email=email, | |
| password=password, | |
| display_name=display_name | |
| ) | |
| # Create the corresponding user profile in the Realtime Database | |
| user_ref = db_ref.child(f'users/{user.uid}') | |
| user_data = { | |
| 'email': email, | |
| 'displayName': display_name, # Save the name to the database | |
| 'credits': 15, | |
| 'is_admin': False, | |
| 'createdAt': datetime.utcnow().isoformat() | |
| } | |
| user_ref.set(user_data) | |
| logger.info(f"New user signed up: {user.uid}, Name: {display_name}") | |
| return jsonify({'success': True, 'uid': user.uid, **user_data}), 201 | |
| except Exception as e: | |
| logger.error(f"Signup failed: {e}") | |
| # Provide a more specific error for existing users | |
| if 'EMAIL_EXISTS' in str(e): | |
| return jsonify({'error': 'An account with this email already exists.'}), 409 | |
| return jsonify({'error': str(e)}), 400 | |
| def social_signin(): | |
| """ | |
| Ensures a user record exists and is up-to-date in the Realtime Database. | |
| ✅ IMPROVEMENT: Now backfills the displayName for existing social users | |
| if it's missing from the database. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if user_data: | |
| # User already exists. Check if their displayName is missing. | |
| if 'displayName' not in user_data or user_data['displayName'] is None: | |
| try: | |
| # The name is missing in our DB, let's sync it from Auth. | |
| firebase_user = auth.get_user(uid) | |
| if firebase_user.display_name: | |
| logger.info(f"Backfilling missing displayName for existing user {uid}.") | |
| user_ref.update({'displayName': firebase_user.display_name}) | |
| # Get the updated data to return to the client | |
| user_data = user_ref.get() | |
| except Exception as e: | |
| logger.error(f"Could not backfill displayName for user {uid}: {e}") | |
| return jsonify({'uid': uid, **user_data}), 200 | |
| else: | |
| # This is a new user (first social login), create their full profile. | |
| logger.info(f"New social user detected: {uid}. Creating database profile.") | |
| try: | |
| firebase_user = auth.get_user(uid) | |
| new_user_data = { | |
| 'email': firebase_user.email, | |
| 'displayName': firebase_user.display_name, | |
| 'credits': 15, | |
| 'is_admin': False, | |
| 'createdAt': datetime.utcnow().isoformat() | |
| } | |
| user_ref.set(new_user_data) | |
| logger.info(f"Successfully created profile for new social user: {uid}") | |
| return jsonify({'success': True, 'uid': uid, **new_user_data}), 201 | |
| except Exception as e: | |
| logger.error(f"Error creating profile for new social user {uid}: {e}") | |
| return jsonify({'error': f'Failed to create user profile: {str(e)}'}), 500 | |
| def get_user_profile(): | |
| """ | |
| Retrieves the user's profile from the Realtime Database. | |
| ✅ FIX: This now correctly includes the 'displayName' in the response. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| user_data = db_ref.child(f'users/{uid}').get() | |
| if not user_data: | |
| return jsonify({'error': 'User not found'}), 404 | |
| return jsonify({'uid': uid, **user_data}) | |
| def update_user_profile(): | |
| """ | |
| ✅ NEW: Allows a logged-in user to update their profile, specifically their displayName. | |
| """ | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Invalid or expired token'}), 401 | |
| data = request.get_json() | |
| new_display_name = data.get('displayName') | |
| if not new_display_name or not isinstance(new_display_name, str) or len(new_display_name.strip()) == 0: | |
| return jsonify({'error': 'A valid displayName is required.'}), 400 | |
| try: | |
| # Step 1: Update the user record in Firebase Authentication | |
| auth.update_user(uid, display_name=new_display_name) | |
| # Step 2: Update the user profile in the Realtime Database | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_ref.update({'displayName': new_display_name}) | |
| logger.info(f"User {uid} updated their displayName to '{new_display_name}'.") | |
| return jsonify({'success': True, 'message': 'Profile updated successfully.'}), 200 | |
| except Exception as e: | |
| logger.error(f"Error updating profile for user {uid}: {e}") | |
| return jsonify({'error': f'Failed to update profile: {str(e)}'}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 4. FEEDBACK AND CREDIT REQUESTS (USER-FACING) | |
| # ----------------------------------------------------------------------------- | |
| def submit_feedback(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 | |
| try: | |
| data = request.get_json() | |
| if not data or not data.get('message'): return jsonify({'error': 'Message is required'}), 400 | |
| user_email = (db_ref.child(f'users/{uid}').get() or {}).get('email', 'unknown') | |
| feedback_ref = db_ref.child('feedback').push() | |
| feedback_record = { | |
| "feedbackId": feedback_ref.key, | |
| "userId": uid, | |
| "userEmail": user_email, | |
| "type": data.get('type', 'general'), | |
| "message": data.get('message'), | |
| "createdAt": datetime.utcnow().isoformat(), | |
| "status": "open" | |
| } | |
| feedback_ref.set(feedback_record) | |
| return jsonify({"success": True, "feedbackId": feedback_ref.key}), 201 | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def request_credits(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Invalid or expired token'}), 401 | |
| try: | |
| data = request.get_json() | |
| if not data or 'requested_credits' not in data: return jsonify({'error': 'requested_credits is required'}), 400 | |
| request_ref = db_ref.child('credit_requests').push() | |
| request_ref.set({ | |
| 'requestId': request_ref.key, | |
| 'userId': uid, | |
| 'requested_credits': data['requested_credits'], | |
| 'status': 'pending', | |
| 'requestedAt': datetime.utcnow().isoformat() | |
| }) | |
| return jsonify({'success': True, 'requestId': request_ref.key}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 5. ADMIN ENDPOINTS | |
| # ----------------------------------------------------------------------------- | |
| def get_admin_profile(): | |
| try: | |
| admin_uid = verify_admin(request.headers.get('Authorization')) | |
| # Fetch all necessary data from Firebase in one go | |
| all_users = db_ref.child('users').get() or {} | |
| all_projects = db_ref.child('projects').get() or {} | |
| all_feedback = db_ref.child('feedback').get() or {} | |
| all_credit_requests = db_ref.child('credit_requests').get() or {} | |
| # --- User Statistics Calculation --- | |
| total_users = len(all_users) | |
| admin_count = 0 | |
| total_credits_in_system = 0 | |
| new_users_last_7_days = 0 | |
| seven_days_ago = datetime.utcnow() - timedelta(days=7) | |
| for user_data in all_users.values(): | |
| if user_data.get('is_admin', False): | |
| admin_count += 1 | |
| total_credits_in_system += user_data.get('credits', 0) | |
| # Check for new users | |
| try: | |
| created_at_str = user_data.get('createdAt') | |
| if created_at_str: | |
| # Accommodate different possible ISO formats | |
| user_created_at = datetime.fromisoformat(created_at_str.replace('Z', '+00:00')) | |
| if user_created_at.replace(tzinfo=None) > seven_days_ago: | |
| new_users_last_7_days += 1 | |
| except (ValueError, TypeError): | |
| # Ignore if date format is invalid or missing | |
| pass | |
| # --- Project Statistics Calculation --- | |
| total_projects = len(all_projects) | |
| projects_by_status = { | |
| "awaiting_approval": 0, | |
| "awaiting_selection": 0, | |
| "ready": 0, | |
| "unknown": 0 | |
| } | |
| projects_by_category = {} | |
| for project_data in all_projects.values(): | |
| # Tally by status | |
| status = project_data.get('status', 'unknown') | |
| projects_by_status[status] = projects_by_status.get(status, 0) + 1 | |
| # Tally by category | |
| category = project_data.get('category', 'N/A') | |
| projects_by_category[category] = projects_by_category.get(category, 0) + 1 | |
| # --- System Health Calculation --- | |
| open_feedback_count = sum(1 for fb in all_feedback.values() if fb.get('status') == 'open') | |
| pending_requests_count = sum(1 for req in all_credit_requests.values() if req.get('status') == 'pending') | |
| # Assemble the final response object | |
| admin_personal_data = all_users.get(admin_uid, {}) | |
| response_data = { | |
| 'uid': admin_uid, | |
| 'email': admin_personal_data.get('email'), | |
| 'credits': admin_personal_data.get('credits'), | |
| 'is_admin': True, | |
| 'dashboardStats': { | |
| 'users': { | |
| 'total': total_users, | |
| 'admins': admin_count, | |
| 'regular': total_users - admin_count, | |
| 'newLast7Days': new_users_last_7_days, | |
| 'totalCreditsInSystem': total_credits_in_system | |
| }, | |
| 'projects': { | |
| 'total': total_projects, | |
| 'byStatus': projects_by_status, | |
| 'byCategory': projects_by_category | |
| }, | |
| 'system': { | |
| 'openFeedback': open_feedback_count, | |
| 'pendingCreditRequests': pending_requests_count | |
| } | |
| } | |
| } | |
| return jsonify(response_data), 200 | |
| except PermissionError as e: | |
| return jsonify({'error': str(e)}), 403 # Use 403 Forbidden for permission issues | |
| except Exception as e: | |
| print(traceback.format_exc()) | |
| return jsonify({'error': f"An internal error occurred: {e}"}), 500 | |
| def list_credit_requests(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| requests_data = db_ref.child('credit_requests').get() or {} | |
| return jsonify(list(requests_data.values())) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def process_credit_request(request_id): | |
| try: | |
| admin_uid = verify_admin(request.headers.get('Authorization')) | |
| req_ref = db_ref.child(f'credit_requests/{request_id}') | |
| req_data = req_ref.get() | |
| if not req_data: return jsonify({'error': 'Credit request not found'}), 404 | |
| decision = request.json.get('decision') | |
| if decision not in ['approved', 'declined']: return jsonify({'error': 'Decision must be "approved" or "declined"'}), 400 | |
| if decision == 'approved': | |
| user_ref = db_ref.child(f'users/{req_data["userId"]}') | |
| user_data = user_ref.get() | |
| if user_data: | |
| new_total = user_data.get('credits', 0) + int(req_data.get('requested_credits', 0)) | |
| user_ref.update({'credits': new_total}) | |
| req_ref.update({'status': decision, 'processedBy': admin_uid, 'processedAt': datetime.utcnow().isoformat()}) | |
| return jsonify({'success': True, 'message': f'Request {decision}.'}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_view_feedback(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| feedback_data = db_ref.child('feedback').get() or {} | |
| return jsonify(list(feedback_data.values())) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_list_users(): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| all_users = db_ref.child('users').get() or {} | |
| user_list = [{'uid': uid, **data} for uid, data in all_users.items()] | |
| return jsonify(user_list) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def admin_update_credits(uid): | |
| try: | |
| verify_admin(request.headers.get('Authorization')) | |
| add_credits = request.json.get('add_credits') | |
| if add_credits is None: return jsonify({'error': 'add_credits is required'}), 400 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data: return jsonify({'error': 'User not found'}), 404 | |
| new_total = user_data.get('credits', 0) + float(add_credits) | |
| user_ref.update({'credits': new_total}) | |
| return jsonify({'success': True, 'new_total_credits': new_total}) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| # ----------------------------------------------------------------------------- | |
| # 6. DIY PROJECT ENDPOINTS (Core Logic) | |
| # ----------------------------------------------------------------------------- | |
| # (The project endpoints from the previous answer go here, unchanged) | |
| def create_project(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data or user_data.get('credits', 0) < 1: | |
| return jsonify({'error': 'Insufficient credits'}), 402 | |
| if 'image' not in request.files: | |
| return jsonify({'error': 'Image file is required'}), 400 | |
| image_file = request.files['image'] | |
| context_text = request.form.get('contextText', '') | |
| image_bytes = image_file.read() | |
| pil_image = Image.open(io.BytesIO(image_bytes)) | |
| try: | |
| category_prompt = ( | |
| "You are an expert DIY assistant. Analyze the user's image and context. " | |
| f"Context: '{context_text}'. " | |
| "Categorize the project into ONE of the following: " | |
| "Home Appliance Repair, Automotive Maintenance, Gardening & Urban Farming, " | |
| "Upcycling & Sustainable Crafts, or DIY Project Creation. " | |
| "Reply with ONLY the category name." | |
| ) | |
| category = send_text_request(CATEGORY_MODEL, category_prompt, pil_image) | |
| if not category: return jsonify({'error': 'Failed to get project category from AI.'}), 500 | |
| plan_prompt = f""" | |
| You are an expert DIY assistant in the category: {category}. | |
| User Context: "{context_text if context_text else 'No context provided.'}" | |
| Based on the image and context, perform the following: | |
| 1. **Title:** Create a short, clear title for this project. | |
| 2. **Description:** Write a brief, one-paragraph description of the goal. | |
| 3. **Initial Plan:** | |
| - If 'Upcycling & Sustainable Crafts' AND no specific project is mentioned, propose three distinct project options as a numbered list under "UPCYCLING OPTIONS:". | |
| - For all other cases, briefly outline the main stages of the proposed solution. | |
| Structure your response EXACTLY like this: | |
| TITLE: [Your title] | |
| DESCRIPTION: [Your description] | |
| INITIAL PLAN: | |
| [Your plan or 3 options] | |
| """ | |
| plan_response = send_text_request(GENERATION_MODEL, plan_prompt, pil_image) | |
| if not plan_response: return jsonify({'error': 'Failed to generate project plan from AI.'}), 500 | |
| title = re.search(r"TITLE:\s*(.*)", plan_response).group(1).strip() | |
| description = re.search(r"DESCRIPTION:\s*(.*)", plan_response, re.DOTALL).group(1).strip() | |
| initial_plan_text = re.search(r"INITIAL PLAN:\s*(.*)", plan_response, re.DOTALL).group(1).strip() | |
| upcycling_options = re.findall(r"^\s*\d+\.\s*(.*)", initial_plan_text, re.MULTILINE) if "UPCYCLING OPTIONS:" in initial_plan_text else [] | |
| initial_plan = initial_plan_text if not upcycling_options else "" | |
| status = "awaiting_selection" if upcycling_options else "awaiting_approval" | |
| project_id = str(uuid.uuid4()) | |
| image_path = f"users/{uid}/projects/{project_id}/initial_image.png" | |
| image_url = upload_to_storage(image_bytes, image_path, content_type=image_file.content_type) | |
| project_data = { | |
| "uid": uid, "projectId": project_id, "status": status, "createdAt": datetime.utcnow().isoformat(), | |
| "userImageURL": image_url, "contextText": context_text, "projectTitle": title, | |
| "projectDescription": description, "category": category, "initialPlan": initial_plan, | |
| "upcyclingOptions": upcycling_options, "toolsList": [], "steps": [] | |
| } | |
| db_ref.child(f'projects/{project_id}').set(project_data) | |
| user_ref.update({'credits': user_data.get('credits', 0) - 1}) | |
| return jsonify(project_data), 201 | |
| except Exception as e: | |
| print(traceback.format_exc()) | |
| return jsonify({'error': f"An error occurred: {e}"}), 500 | |
| def approve_project_plan(project_id): | |
| start_time = time.time() | |
| logger.info(f"[PROJECT APPROVAL] Starting approval process for project: {project_id}") | |
| # Authorization timing | |
| auth_start = time.time() | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Unauthorized access attempt for project: {project_id}") | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| auth_time = time.time() - auth_start | |
| logger.info(f"[PROJECT APPROVAL] Authorization completed in {auth_time:.3f}s for user: {uid}") | |
| # User data fetch timing | |
| user_fetch_start = time.time() | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if not user_data or user_data.get('credits', 0) < 5: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Insufficient credits for user: {uid}, credits: {user_data.get('credits', 0) if user_data else 0}") | |
| return jsonify({'error': 'Insufficient credits'}), 402 | |
| user_fetch_time = time.time() - user_fetch_start | |
| logger.info(f"[PROJECT APPROVAL] User data fetch completed in {user_fetch_time:.3f}s, credits: {user_data.get('credits', 0)}") | |
| # Project data fetch timing | |
| project_fetch_start = time.time() | |
| project_ref = db_ref.child(f'projects/{project_id}') | |
| project_data = project_ref.get() | |
| if not project_data or project_data.get('uid') != uid: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Project not found or access denied - project_id: {project_id}, uid: {uid}") | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| project_fetch_time = time.time() - project_fetch_start | |
| logger.info(f"[PROJECT APPROVAL] Project data fetch completed in {project_fetch_time:.3f}s for project: {project_data.get('projectTitle', 'Unknown')}") | |
| # Image download and processing timing | |
| selected_option = request.json.get('selectedOption') | |
| logger.info(f"[PROJECT APPROVAL] Selected option: {selected_option}") | |
| image_download_start = time.time() | |
| try: | |
| response = requests.get(project_data['userImageURL'], timeout=30) | |
| response.raise_for_status() | |
| except requests.RequestException as e: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Image download failed: {e}") | |
| return jsonify({'error': 'Failed to download project image'}), 500 | |
| image_download_time = time.time() - image_download_start | |
| logger.info(f"[PROJECT APPROVAL] Image download completed in {image_download_time:.3f}s, size: {len(response.content)} bytes") | |
| image_processing_start = time.time() | |
| try: | |
| pil_image = Image.open(io.BytesIO(response.content)).convert('RGB') | |
| except Exception as e: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Image processing failed: {e}") | |
| return jsonify({'error': 'Failed to process project image'}), 500 | |
| image_processing_time = time.time() - image_processing_start | |
| logger.info(f"[PROJECT APPROVAL] Image processing completed in {image_processing_time:.3f}s") | |
| # Context preparation timing | |
| context_start = time.time() | |
| context = ( | |
| f"The user chose the upcycling project: '{selected_option}'." | |
| if selected_option | |
| else f"The user has approved the plan for '{project_data['projectTitle']}'." | |
| ) | |
| detailed_prompt = f""" | |
| You are a DIY expert. The user wants to proceed with the project titled "{project_data['projectTitle']}". | |
| {context} | |
| Provide a detailed guide. For each step, you MUST provide a simple, clear illustrative image. | |
| Format your response EXACTLY like this: | |
| TOOLS AND MATERIALS: | |
| - Tool A | |
| - Material B | |
| STEPS(Maximum 5 steps): | |
| 1. First step instructions. | |
| 2. Second step instructions... | |
| """ | |
| context_time = time.time() - context_start | |
| logger.info(f"[PROJECT APPROVAL] Context preparation completed in {context_time:.3f}s") | |
| try: | |
| # AI generation timing | |
| ai_start = time.time() | |
| logger.info(f"[PROJECT APPROVAL] Starting AI generation with model: {GENERATION_MODEL}") | |
| chat = client.chats.create( | |
| model=GENERATION_MODEL, | |
| config=types.GenerateContentConfig(response_modalities=["Text", "Image"]) | |
| ) | |
| full_resp = chat.send_message([detailed_prompt, pil_image]) | |
| ai_time = time.time() - ai_start | |
| logger.info(f"[PROJECT APPROVAL] AI generation completed in {ai_time:.3f}s") | |
| # Response parsing timing | |
| parsing_start = time.time() | |
| gen_parts = full_resp.candidates[0].content.parts | |
| combined_text = "" | |
| inline_images = [] | |
| for part in gen_parts: | |
| if part.text is not None: | |
| combined_text += part.text + "\n" | |
| if part.inline_data is not None: | |
| img = Image.open(io.BytesIO(part.inline_data.data)).convert('RGB') | |
| inline_images.append(img) | |
| combined_text = combined_text.strip() | |
| parsing_time = time.time() - parsing_start | |
| logger.info(f"[PROJECT APPROVAL] Response parsing completed in {parsing_time:.3f}s, found {len(inline_images)} images") | |
| # Text extraction timing with robust error handling | |
| extraction_start = time.time() | |
| # Add debug logging to see what the AI actually returned | |
| logger.info(f"[PROJECT APPROVAL] AI Response structure check:") | |
| logger.info(f"[PROJECT APPROVAL] Full response length: {len(combined_text)}") | |
| logger.info(f"[PROJECT APPROVAL] Contains 'TOOLS AND MATERIALS': {'TOOLS AND MATERIALS' in combined_text.upper()}") | |
| logger.info(f"[PROJECT APPROVAL] Contains 'STEPS': {'STEPS' in combined_text.upper()}") | |
| logger.info(f"[PROJECT APPROVAL] Response preview: {combined_text[:300]}...") | |
| # More robust regex patterns with error handling - updated for your production format | |
| tools_match = re.search(r"TOOLS AND MATERIALS:\s*(.*?)\s*(?=STEPS\s*\(|STEPS\s*:|$)", combined_text, re.DOTALL | re.IGNORECASE) | |
| steps_match = re.search(r"STEPS\s*\([^)]*\):\s*(.*)|STEPS\s*:\s*(.*)", combined_text, re.DOTALL | re.IGNORECASE) | |
| if not tools_match: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Could not find TOOLS AND MATERIALS section in AI response") | |
| logger.error(f"[PROJECT APPROVAL] AI Response full text: {combined_text}") | |
| return jsonify({'error': 'AI response format error: Could not parse tools section'}), 500 | |
| if not steps_match: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Could not find STEPS section in AI response") | |
| logger.error(f"[PROJECT APPROVAL] AI Response full text: {combined_text}") | |
| return jsonify({'error': 'AI response format error: Could not parse steps section'}), 500 | |
| tools_section = tools_match.group(1).strip() | |
| steps_section = (steps_match.group(1) or steps_match.group(2)).strip() if steps_match else "" | |
| # Additional validation | |
| if not tools_section: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Empty tools section found") | |
| return jsonify({'error': 'AI response format error: Empty tools section'}), 500 | |
| if not steps_section: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Empty steps section found") | |
| return jsonify({'error': 'AI response format error: Empty steps section'}), 500 | |
| tools_list = [line.strip("- ").strip() for line in tools_section.split('\n') if line.strip() and not line.strip().startswith('-')] | |
| # Also include lines that start with dashes | |
| dash_tools = [line.strip("- ").strip() for line in tools_section.split('\n') if line.strip().startswith('-')] | |
| tools_list.extend(dash_tools) | |
| # Remove duplicates while preserving order | |
| seen = set() | |
| tools_list = [x for x in tools_list if not (x in seen or seen.add(x))] | |
| parsed_steps = parse_numbered_steps(steps_section) | |
| # Validate parsed results | |
| if not tools_list: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: No tools parsed from response") | |
| logger.error(f"[PROJECT APPROVAL] Tools section was: {tools_section}") | |
| return jsonify({'error': 'AI response format error: No tools found'}), 500 | |
| if not parsed_steps: | |
| logger.error(f"[PROJECT APPROVAL] ERROR: No steps parsed from response") | |
| logger.error(f"[PROJECT APPROVAL] Steps section was: {steps_section}") | |
| return jsonify({'error': 'AI response format error: No steps found'}), 500 | |
| extraction_time = time.time() - extraction_start | |
| logger.info(f"[PROJECT APPROVAL] Text extraction completed in {extraction_time:.3f}s, tools: {len(tools_list)}, steps: {len(parsed_steps)}") | |
| if len(parsed_steps) != len(inline_images): | |
| logger.error(f"[PROJECT APPROVAL] ERROR: AI response mismatch - Steps: {len(parsed_steps)}, Images: {len(inline_images)}") | |
| # Try to handle the mismatch gracefully | |
| min_length = min(len(parsed_steps), len(inline_images)) | |
| if min_length > 0: | |
| logger.info(f"[PROJECT APPROVAL] Attempting to proceed with {min_length} steps/images") | |
| parsed_steps = parsed_steps[:min_length] | |
| inline_images = inline_images[:min_length] | |
| else: | |
| return jsonify({'error': 'AI response mismatch: No valid steps and images found.'}), 500 | |
| # Step processing timing | |
| step_processing_start = time.time() | |
| final_steps = [] | |
| total_upload_time = 0 | |
| total_tts_time = 0 | |
| for i, step_info in enumerate(parsed_steps): | |
| logger.info(f"[PROJECT APPROVAL] Processing step {i+1}/{len(parsed_steps)}") | |
| try: | |
| # Image upload timing | |
| image_upload_start = time.time() | |
| img_byte_arr = io.BytesIO() | |
| inline_images[i].save(img_byte_arr, format='JPEG', optimize=True, quality=70) | |
| img_path = f"users/{uid}/projects/{project_id}/steps/step_{i+1}_image.jpg" | |
| img_url = upload_to_storage(img_byte_arr.getvalue(), img_path, 'image/jpeg') | |
| image_upload_time = time.time() - image_upload_start | |
| total_upload_time += image_upload_time | |
| logger.info(f"[PROJECT APPROVAL] Step {i+1} image upload completed in {image_upload_time:.3f}s") | |
| # TTS generation timing | |
| tts_start = time.time() | |
| narration_url = generate_tts_audio_and_upload(step_info['text'], uid, project_id, i + 1) | |
| tts_time = time.time() - tts_start | |
| total_tts_time += tts_time | |
| logger.info(f"[PROJECT APPROVAL] Step {i+1} TTS generation completed in {tts_time:.3f}s") | |
| step_info.update({ | |
| "imageUrl": img_url, | |
| "narrationUrl": narration_url, | |
| "isDone": False, | |
| "notes": "" | |
| }) | |
| final_steps.append(step_info) | |
| except Exception as e: | |
| logger.error(f"[PROJECT APPROVAL] ERROR processing step {i+1}: {e}") | |
| # Continue with other steps rather than failing entirely | |
| step_info.update({ | |
| "imageUrl": "", | |
| "narrationUrl": "", | |
| "isDone": False, | |
| "notes": "" | |
| }) | |
| final_steps.append(step_info) | |
| step_processing_time = time.time() - step_processing_start | |
| logger.info(f"[PROJECT APPROVAL] All steps processing completed in {step_processing_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] Total upload time: {total_upload_time:.3f}s, Total TTS time: {total_tts_time:.3f}s") | |
| # Database update timing | |
| db_update_start = time.time() | |
| update_data = { | |
| "status": "ready", | |
| "toolsList": tools_list, | |
| "steps": final_steps, | |
| "selectedOption": selected_option or "" | |
| } | |
| project_ref.update(update_data) | |
| logger.info(f"[PROJECT APPROVAL] Updating data in db: {len(update_data)} fields") | |
| db_update_time = time.time() - db_update_start | |
| logger.info(f"[PROJECT APPROVAL] Database update completed in {db_update_time:.3f}s") | |
| # Final project fetch timing | |
| final_fetch_start = time.time() | |
| updated_project = project_ref.get() | |
| updated_project["projectId"] = project_id | |
| final_fetch_time = time.time() - final_fetch_start | |
| logger.info(f"[PROJECT APPROVAL] Final project fetch completed in {final_fetch_time:.3f}s") | |
| # Credits deduction timing | |
| credits_update_start = time.time() | |
| user_ref.update({'credits': user_data.get('credits', 0) - 5}) | |
| credits_update_time = time.time() - credits_update_start | |
| logger.info(f"[PROJECT APPROVAL] Credits update completed in {credits_update_time:.3f}s") | |
| # Total time calculation | |
| total_time = time.time() - start_time | |
| logger.info(f"[PROJECT APPROVAL] SUCCESS: Project approval completed in {total_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] TIMING BREAKDOWN:") | |
| logger.info(f"[PROJECT APPROVAL] - Authorization: {auth_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - User fetch: {user_fetch_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Project fetch: {project_fetch_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Image download: {image_download_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Image processing: {image_processing_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Context prep: {context_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - AI generation: {ai_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Response parsing: {parsing_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Text extraction: {extraction_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Step processing: {step_processing_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Total uploads: {total_upload_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Total TTS: {total_tts_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - DB update: {db_update_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Final fetch: {final_fetch_time:.3f}s") | |
| logger.info(f"[PROJECT APPROVAL] - Credits update: {credits_update_time:.3f}s") | |
| return jsonify(updated_project) | |
| except Exception as e: | |
| total_time = time.time() - start_time | |
| logger.error(f"[PROJECT APPROVAL] ERROR: Exception occurred after {total_time:.3f}s: {e}") | |
| logger.error(f"[PROJECT APPROVAL] Error type: {type(e).__name__}") | |
| logger.error(f"[PROJECT APPROVAL] Project ID: {project_id}, User ID: {uid}") | |
| import traceback | |
| logger.error(f"[PROJECT APPROVAL] Full traceback: {traceback.format_exc()}") | |
| return jsonify({'error': f'Internal server error: {str(e)}'}), 500 | |
| def list_projects(): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| projects = (db_ref.child('projects').order_by_child('uid').equal_to(uid).get() or {}).values() | |
| return jsonify(list(projects)) | |
| def get_project(project_id): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| project_data = db_ref.child(f'projects/{project_id}').get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| return jsonify(project_data) | |
| def update_step(project_id, step_number): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| data = request.get_json() | |
| if data is None: return jsonify({'error': 'JSON body is required'}), 400 | |
| project_data = db_ref.child(f'projects/{project_id}').get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| steps = project_data.get('steps', []) | |
| step_index = next((i for i, s in enumerate(steps) if s.get('stepNumber') == step_number), -1) | |
| if step_index == -1: return jsonify({'error': f'Step number {step_number} not found'}), 404 | |
| step_path = f'projects/{project_id}/steps/{step_index}' | |
| if 'isDone' in data: db_ref.child(f'{step_path}/isDone').set(bool(data['isDone'])) | |
| if 'notes' in data: db_ref.child(f'{step_path}/notes').set(str(data['notes'])) | |
| return jsonify({"success": True, "updatedStep": db_ref.child(step_path).get()}) | |
| def delete_project(project_id): | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: return jsonify({'error': 'Unauthorized'}), 401 | |
| project_ref = db_ref.child(f'projects/{project_id}') | |
| project_data = project_ref.get() | |
| if not project_data or project_data.get('uid') != uid: | |
| return jsonify({'error': 'Project not found or access denied'}), 404 | |
| project_ref.delete() | |
| for blob in bucket.list_blobs(prefix=f"users/{uid}/projects/{project_id}/"): | |
| blob.delete() | |
| return jsonify({"success": True, "message": f"Project {project_id} deleted."}) | |
| #------------------------ | |
| # AI phone call ElevenLabs | |
| #------------------------- | |
| import math | |
| # This code should be added to your existing main.py file. | |
| # It assumes 'db_ref', 'client', 'logger', and 'verify_token' are already defined. | |
| def summarize_user_history(uid): | |
| """ | |
| Fetches all of a user's past transcripts, sends them to Gemini for analysis, | |
| and returns a concise, actionable summary for the AI agent. | |
| """ | |
| try: | |
| logger.info(f"[BRIEFING] Fetching transcript history for user: {uid}") | |
| transcripts_ref = db_ref.child(f'transcripts/{uid}') | |
| all_transcripts = transcripts_ref.get() | |
| if not all_transcripts: | |
| logger.info(f"[BRIEFING] No history found for user {uid}. Returning 'new user' summary.") | |
| return "This is a new user." | |
| history_list = [] | |
| for transcript_data in all_transcripts.values(): | |
| history_list.append({ | |
| "date": transcript_data.get("createdAt"), | |
| "project_title": transcript_data.get("projectId"), | |
| "transcript": transcript_data.get("transcript") | |
| }) | |
| history_json = json.dumps(history_list, indent=2) | |
| analyst_prompt = """ | |
| You are a world-class executive assistant and data analyst. Your job is to analyze a user's conversation history with a DIY expert AI named Alfred. Your goal is to produce a 'Pre-Call Briefing' for Alfred that is concise, insightful, and focuses on personal details. | |
| The history will be a JSON string containing a list of past conversations. Each conversation has a date, project title, and a full transcript. | |
| Your task is to identify patterns and extract the following key details. If a detail is not mentioned, omit that line from the output. | |
| - Personal Context: Identify who the projects are for (e.g., 'wife', 'son') and any mentioned personal details (e.g., 'has a dog', 'works as a teacher'). | |
| - Preferences: Note any mentioned tastes or preferences (e.g., 'likes rustic style', 'favorite color is green'). | |
| - Skill Assessment: Assess their DIY skill level (e.g., 'Beginner, hesitant with power tools', 'Intermediate, comfortable with plumbing'). | |
| - Tool Inventory: List any specific tools they have mentioned owning or needing (e.g., 'Owns a power drill', 'Needed to buy a special wrench'). | |
| - Recurring Themes: Identify any patterns in their questions or struggles (e.g., 'Often asks for clarification on measurements', 'Struggles with painting techniques'). | |
| Your output MUST be a series of bullet points. Do not add any conversational text or greetings. Start the entire response with 'Here is your briefing on this user:'. | |
| Example Output: | |
| Here is your briefing on this user: | |
| * Is building this project for: their daughter | |
| * Personal context: has a golden retriever | |
| * Assessed skill level: Beginner, but learns quickly | |
| * Known tools: owns a basic screwdriver set | |
| * Recurring themes: frequently asks for pet-safe material recommendations | |
| * comorehensive summary of the transcript(s): More details about the user in a short paragraph | |
| """ | |
| logger.info(f"[BRIEFING] Sending {len(history_list)} transcripts to Gemini for summarization.") | |
| # ✅ THE FIX: Using the exact model name and API call structure you provided. | |
| response = client.models.generate_content( | |
| model='gemini-2.0-flash-lite', | |
| contents=[analyst_prompt, history_json] | |
| ) | |
| summary = response.text.strip() | |
| logger.info(f"[BRIEFING] Received summary from Gemini: {summary}") | |
| return summary | |
| except Exception as e: | |
| logger.error(f"[BRIEFING] Failed to generate user summary for {uid}: {e}") | |
| return "Could not retrieve user history." | |
| def get_call_briefing(): | |
| """ | |
| The single endpoint for the frontend to call before starting a conversation. | |
| It orchestrates getting the user's history and summarizing it. | |
| """ | |
| logger.info("[BRIEFING] Received request for a new call briefing.") | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| try: | |
| memory_summary = summarize_user_history(uid) | |
| return jsonify({"memory_summary": memory_summary}), 200 | |
| except Exception as e: | |
| logger.error(f"An unexpected error occurred in get_call_briefing for user {uid}: {e}") | |
| return jsonify({'error': 'Failed to generate call briefing.'}), 500 | |
| def test_agent(): | |
| """ | |
| Fixed debug endpoint that tests the CORRECT conversation endpoint. | |
| """ | |
| if not ELEVENLABS_API_KEY: | |
| return jsonify({'error': 'API key not set on server'}), 500 | |
| headers = {"xi-api-key": ELEVENLABS_API_KEY} | |
| results = {'agent_id': AGENT_ID, 'tests': {}} | |
| try: | |
| # Test 1: Check if the agent can be found by its ID. | |
| agent_url = f"https://api.elevenlabs.io/v1/convai/agents/{AGENT_ID}" | |
| agent_resp = requests.get(agent_url, headers=headers, timeout=10) | |
| results['tests']['agent_check'] = { | |
| 'status': agent_resp.status_code, | |
| 'exists': agent_resp.ok | |
| } | |
| # Test 2: Check if we can get a signed URL for this agent. This is the most important test. | |
| conv_url = f"https://api.elevenlabs.io/v1/convai/conversation/get-signed-url?agent_id={AGENT_ID}" | |
| conv_resp = requests.get(conv_url, headers=headers, timeout=10) | |
| results['tests']['get_signed_url_check'] = { | |
| 'status': conv_resp.status_code, | |
| 'url_received': 'signed_url' in conv_resp.json() if conv_resp.ok else False | |
| } | |
| return jsonify(results) | |
| except Exception as e: | |
| return jsonify({'error': str(e), 'agent_id': AGENT_ID}) | |
| def log_call_usage(project_id): | |
| """ | |
| ✅ MODIFIED: Now accepts and stores the full conversation transcript | |
| in addition to calculating credit cost. | |
| """ | |
| logger.info(f"[LOGGING] Received usage log for project: {project_id}") | |
| uid = verify_token(request.headers.get('Authorization')) | |
| if not uid: | |
| return jsonify({'error': 'Unauthorized'}), 401 | |
| data = request.get_json() | |
| duration_seconds = data.get("durationSeconds") | |
| transcript = data.get("transcript") # Get the new transcript field | |
| if duration_seconds is None: | |
| return jsonify({'error': 'Invalid duration provided.'}), 400 | |
| # --- Credit Calculation --- | |
| minutes = math.ceil(duration_seconds / 60) | |
| cost = minutes * 3 | |
| logger.info(f"[LOGGING] User '{uid}' call duration: {duration_seconds:.2f}s, Cost: {cost} credits.") | |
| try: | |
| # --- Transcript Storage --- | |
| if transcript and isinstance(transcript, str) and len(transcript) > 10: | |
| transcript_id = f"{project_id}_{int(time.time())}" | |
| transcript_ref = db_ref.child(f'transcripts/{uid}/{transcript_id}') | |
| transcript_data = { | |
| "transcript": transcript, | |
| "projectId": project_id, | |
| "userId": uid, | |
| "durationSeconds": duration_seconds, | |
| "createdAt": datetime.utcnow().isoformat() | |
| } | |
| transcript_ref.set(transcript_data) | |
| logger.info(f"[LOGGING] Successfully stored transcript {transcript_id} for user '{uid}'.") | |
| else: | |
| logger.warning(f"[LOGGING] No valid transcript provided for user '{uid}' on project {project_id}.") | |
| # --- Credit Deduction --- | |
| user_ref = db_ref.child(f'users/{uid}') | |
| user_data = user_ref.get() | |
| if user_data is None: | |
| return jsonify({'error': 'User not found.'}), 404 | |
| current_credits = user_data.get('credits', 0) | |
| new_credits = max(0, current_credits - cost) | |
| user_ref.update({'credits': new_credits}) | |
| logger.info(f"[LOGGING] Updated credits for user '{uid}'. New balance: {new_credits}") | |
| return jsonify({ | |
| "status": "success", | |
| "creditsDeducted": cost, | |
| "remainingCredits": new_credits | |
| }), 200 | |
| except Exception as e: | |
| logger.error(f"[LOGGING] A database error occurred for user '{uid}': {e}") | |
| return jsonify({'error': 'A server error occurred while updating credits.'}), 500 | |
| #Stripe Payments | |
| def get_stripe_config(): | |
| """ | |
| Returns safe-to-expose Stripe configuration values for the frontend. | |
| No secret keys are exposed here. | |
| """ | |
| try: | |
| return jsonify({ | |
| "publishableKey": os.environ.get("STRIPE_PUBLISHABLE_KEY"), | |
| "priceIds": { | |
| "fixer": STRIPE_PRICE_FIXER, | |
| "pro": STRIPE_PRICE_PRO, | |
| } | |
| }), 200 | |
| except Exception as e: | |
| logger.error(f"[STRIPE] Failed to serve config: {e}") | |
| return jsonify({"error": "Server configuration error"}), 500 | |
| def create_checkout_session(): | |
| """ | |
| Creates a Stripe Checkout Session for a recurring subscription. | |
| Client body: | |
| { "plan": "standard" } -> The Fixer (200 credits/month) | |
| { "plan": "premium" } -> The Pro (500 credits/month) | |
| """ | |
| uid = verify_token(request.headers.get("Authorization")) | |
| if not uid: | |
| return jsonify({"error": "Unauthorized"}), 401 | |
| if not STRIPE_SECRET_KEY or not STRIPE_PRICE_FIXER or not STRIPE_PRICE_PRO: | |
| logger.error("[STRIPE] Missing Stripe configuration.") | |
| return jsonify({"error": "Stripe is not configured on the server."}), 500 | |
| data = request.get_json() or {} | |
| plan_key = (data.get("plan") or "").lower().strip() | |
| plan_cfg = PLAN_CONFIG.get(plan_key) | |
| if not plan_cfg: | |
| return jsonify({"error": "Invalid plan selected."}), 400 | |
| price_id = plan_cfg.get("price_id") | |
| if not price_id or not price_id.startswith("price_"): | |
| logger.error(f"[STRIPE] Misconfigured price_id for plan {plan_key}: {price_id}") | |
| return jsonify({"error": "Billing configuration error – contact support."}), 500 | |
| try: | |
| customer_id = get_or_create_stripe_customer(uid) | |
| session = stripe.checkout.Session.create( | |
| mode="subscription", | |
| customer=customer_id, | |
| payment_method_types=["card"], | |
| line_items=[{ | |
| "price": price_id, | |
| "quantity": 1, | |
| }], | |
| success_url=STRIPE_SUCCESS_URL + "?session_id={CHECKOUT_SESSION_ID}", | |
| cancel_url=STRIPE_CANCEL_URL, | |
| metadata={ | |
| "firebase_uid": uid, | |
| "plan": plan_key, | |
| }, | |
| subscription_data={ | |
| "metadata": { | |
| "firebase_uid": uid, | |
| "plan": plan_key, | |
| } | |
| }, | |
| ) | |
| logger.info( | |
| f"[STRIPE] Created checkout session {session.id} for user {uid}, plan {plan_key}" | |
| ) | |
| return jsonify({ | |
| "id": session.id, | |
| "url": session.url, | |
| }), 200 | |
| except Exception as e: | |
| logger.error(f"[STRIPE] Error creating checkout session: {e}") | |
| return jsonify({"error": "Failed to create checkout session."}), 500 | |
| def stripe_webhook(): | |
| """ | |
| Handles Stripe webhook events: | |
| - checkout.session.completed -> initial subscription + first credits | |
| - invoice.payment_succeeded -> monthly renewals (reset credits) | |
| - invoice.payment_failed -> mark planStatus as past_due/unpaid | |
| - customer.subscription.deleted -> cancelled subscription | |
| - customer.subscription.updated -> keep status/expiry in sync | |
| """ | |
| if not STRIPE_WEBHOOK_SECRET: | |
| logger.error("[STRIPE] STRIPE_WEBHOOK_SECRET not set.") | |
| return jsonify({"error": "Webhook secret not configured."}), 500 | |
| payload = request.data | |
| sig_header = request.headers.get("Stripe-Signature") | |
| try: | |
| event = stripe.Webhook.construct_event( | |
| payload, sig_header, STRIPE_WEBHOOK_SECRET | |
| ) | |
| except stripe.error.SignatureVerificationError as e: | |
| logger.error(f"[STRIPE] Webhook signature verification failed: {e}") | |
| return "Invalid signature", 400 | |
| except Exception as e: | |
| logger.error(f"[STRIPE] Webhook parsing error: {e}") | |
| return "Bad request", 400 | |
| event_type = event.get("type") | |
| data_obj = event.get("data", {}).get("object", {}) | |
| logger.info(f"[STRIPE] Webhook event: {event_type}") | |
| try: | |
| # 1) First payment via Checkout | |
| if event_type == "checkout.session.completed": | |
| session = data_obj | |
| metadata = session.get("metadata") or {} | |
| uid = metadata.get("firebase_uid") | |
| plan_key = metadata.get("plan") | |
| subscription_id = session.get("subscription") | |
| logger.info( | |
| f"[STRIPE] checkout.session.completed uid={uid}, plan={plan_key}, sub={subscription_id}" | |
| ) | |
| if subscription_id: | |
| sub = stripe.Subscription.retrieve(subscription_id) | |
| update_user_subscription_from_stripe(sub) | |
| # First period: reset credits to plan allowance (no rollover) | |
| if uid and plan_key: | |
| reset_plan_credits(uid, plan_key) | |
| # 2) Every successful invoice (monthly renewal) | |
| elif event_type == "invoice.payment_succeeded": | |
| invoice = data_obj | |
| subscription_id = invoice.get("subscription") | |
| if subscription_id: | |
| sub = stripe.Subscription.retrieve(subscription_id) | |
| metadata = sub.get("metadata") or {} | |
| uid = metadata.get("firebase_uid") | |
| plan_key = metadata.get("plan") | |
| logger.info( | |
| f"[STRIPE] invoice.payment_succeeded uid={uid}, plan={plan_key}, sub={subscription_id}" | |
| ) | |
| update_user_subscription_from_stripe(sub) | |
| # New billing period: hard reset credits to monthly quota | |
| if uid and plan_key: | |
| reset_plan_credits(uid, plan_key) | |
| # 3) Invoice failed (non-payment) – mark plan as not good | |
| elif event_type == "invoice.payment_failed": | |
| invoice = data_obj | |
| subscription_id = invoice.get("subscription") | |
| if subscription_id: | |
| sub = stripe.Subscription.retrieve(subscription_id) | |
| logger.info( | |
| f"[STRIPE] invoice.payment_failed subscription={subscription_id}, status={sub.get('status')}" | |
| ) | |
| # Status will now be past_due/unpaid; keep in sync | |
| update_user_subscription_from_stripe(sub) | |
| # Optional: if you want to be harsh, you could also zero credits here. | |
| # metadata = sub.get("metadata") or {} | |
| # uid = metadata.get("firebase_uid") | |
| # if uid: | |
| # db_ref.child(f'users/{uid}').update({"credits": 0.0}) | |
| # logger.info(f"[STRIPE] Payment failed – zeroed credits for user {uid}") | |
| # 4) Subscription deleted (cancelled) | |
| elif event_type == "customer.subscription.deleted": | |
| sub = data_obj | |
| logger.info( | |
| f"[STRIPE] customer.subscription.deleted id={sub.get('id')}, status={sub.get('status')}" | |
| ) | |
| update_user_subscription_from_stripe(sub) | |
| # 5) Subscription updated (status, period, etc.) | |
| elif event_type == "customer.subscription.updated": | |
| sub = data_obj | |
| logger.info( | |
| f"[STRIPE] customer.subscription.updated id={sub.get('id')}, status={sub.get('status')}" | |
| ) | |
| update_user_subscription_from_stripe(sub) | |
| except Exception as e: | |
| logger.error(f"[STRIPE] Error handling webhook {event_type}: {e}") | |
| return "", 200 | |
| # ----------------------------------------------------------------------------- | |
| # 7. MAIN EXECUTION | |
| # ----------------------------------------------------------------------------- | |
| if __name__ == '__main__': | |
| app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 7860))) |