Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -21,6 +21,7 @@ from fpdf import FPDF
|
|
| 21 |
import tempfile
|
| 22 |
import urllib.parse
|
| 23 |
from stories import generateResponse
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
# Initialize Flask app and CORS
|
|
@@ -651,6 +652,366 @@ def generate_video_endpoint():
|
|
| 651 |
}), 500
|
| 652 |
|
| 653 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 654 |
#----------Image Editing Endpoint ----------
|
| 655 |
@app.route('/api/story/<string:story_id>/sections/<int:section_idx>/edit-image', methods=['POST'])
|
| 656 |
def edit_section_image_endpoint(story_id, section_idx):
|
|
|
|
| 21 |
import tempfile
|
| 22 |
import urllib.parse
|
| 23 |
from stories import generateResponse
|
| 24 |
+
from styled_video_gen import create_styled_video, DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_FPS, DEFAULT_TRANSITION_DURATION, DEFAULT_FONT, DEFAULT_LOGO_PATH
|
| 25 |
|
| 26 |
|
| 27 |
# Initialize Flask app and CORS
|
|
|
|
| 652 |
}), 500
|
| 653 |
|
| 654 |
|
| 655 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
# --- Styled Video Generation Endpoint ---
|
| 659 |
+
@app.route('/api/styled_video/generate', methods=['POST']) # Uncomment when deploying in Flask app
|
| 660 |
+
def generate_styled_video_endpoint(): # Use this if testing standalone
|
| 661 |
+
"""
|
| 662 |
+
Generates a video based on story data from Firebase, applying client-specified options.
|
| 663 |
+
"""
|
| 664 |
+
# --- Temporary file list for cleanup ---
|
| 665 |
+
temp_files_to_clean = []
|
| 666 |
+
video_output_path = None # Define here for broader scope in finally block
|
| 667 |
+
|
| 668 |
+
try:
|
| 669 |
+
logging.info("β‘οΈ Received video generation request...")
|
| 670 |
+
|
| 671 |
+
# --- Authentication & Authorization ---
|
| 672 |
+
auth_header = request.headers.get('Authorization', '')
|
| 673 |
+
if not auth_header.startswith('Bearer '):
|
| 674 |
+
logging.error("β Auth Error: Missing or invalid token format")
|
| 675 |
+
return jsonify({'error': 'Missing or invalid token', 'log_url': upload_log()}), 401
|
| 676 |
+
|
| 677 |
+
token = auth_header.split(' ')[1]
|
| 678 |
+
uid = verify_token(token) # Assumes verify_token returns UID or None
|
| 679 |
+
if not uid:
|
| 680 |
+
logging.error("β Auth Error: Invalid or expired token")
|
| 681 |
+
return jsonify({'error': 'Invalid or expired token', 'log_url': upload_log()}), 401
|
| 682 |
+
logging.info(f"Authenticated user: {uid}")
|
| 683 |
+
|
| 684 |
+
# --- Check User Credits ---
|
| 685 |
+
user_ref = db.reference(f"users/{uid}") # Assumes this DB path structure
|
| 686 |
+
user_data = user_ref.get()
|
| 687 |
+
if not user_data: user_data = {} # Handle case where user node might not exist yet
|
| 688 |
+
current_credits = user_data.get("credits", 0)
|
| 689 |
+
video_cost = 10 # Define video cost
|
| 690 |
+
|
| 691 |
+
if current_credits < video_cost:
|
| 692 |
+
logging.warning(f"Insufficient credits for user {uid} (has {current_credits}, needs {video_cost})")
|
| 693 |
+
return jsonify({'error': f'Insufficient credits. You need at least {video_cost} credits.', 'log_url': upload_log()}), 403
|
| 694 |
+
|
| 695 |
+
# --- Get Request Data ---
|
| 696 |
+
data = request.get_json()
|
| 697 |
+
if not data:
|
| 698 |
+
logging.error("β Request Error: Invalid or missing JSON payload")
|
| 699 |
+
return jsonify({'error': 'Invalid JSON payload', 'log_url': upload_log()}), 400
|
| 700 |
+
|
| 701 |
+
story_id = data.get('story_id')
|
| 702 |
+
if not story_id:
|
| 703 |
+
logging.error("β Request Error: story_id is required")
|
| 704 |
+
return jsonify({'error': 'story_id is required', 'log_url': upload_log()}), 400
|
| 705 |
+
|
| 706 |
+
# Get video customization options from the request (or use empty dict for defaults)
|
| 707 |
+
video_options = data.get('video_options', {})
|
| 708 |
+
logging.info(f"Received video options: {video_options}")
|
| 709 |
+
|
| 710 |
+
# --- Fetch Story Data ---
|
| 711 |
+
logging.info(f"Fetching story '{story_id}' for user '{uid}' from Firebase...")
|
| 712 |
+
# Consider structuring story data under UID: db.reference(f"stories/{uid}/{story_id}")
|
| 713 |
+
# Using the path from your original code for now:
|
| 714 |
+
story_ref = db.reference(f"stories/{story_id}")
|
| 715 |
+
story_data = story_ref.get()
|
| 716 |
+
|
| 717 |
+
if not story_data:
|
| 718 |
+
logging.error(f"β Firebase Error: Story '{story_id}' not found.")
|
| 719 |
+
return jsonify({'error': 'Story not found', 'log_url': upload_log()}), 404
|
| 720 |
+
|
| 721 |
+
sections = story_data.get("sections", [])
|
| 722 |
+
if not sections or not isinstance(sections, list):
|
| 723 |
+
logging.error(f"β Data Error: No valid 'sections' array found in story '{story_id}'.")
|
| 724 |
+
return jsonify({'error': 'No sections found in the story', 'log_url': upload_log()}), 404
|
| 725 |
+
|
| 726 |
+
# --- Download Assets and Prepare Data ---
|
| 727 |
+
image_pil_list = [] # Stores downloaded PIL Images
|
| 728 |
+
audio_file_paths = [] # Stores paths to downloaded audio files
|
| 729 |
+
section_texts_list = [] # Stores text for each section
|
| 730 |
+
valid_section_indices = [] # Keep track of sections processed successfully
|
| 731 |
+
|
| 732 |
+
logging.info(f"Processing {len(sections)} sections for assets...")
|
| 733 |
+
download_errors = False
|
| 734 |
+
for i, section in enumerate(sections):
|
| 735 |
+
if not isinstance(section, dict):
|
| 736 |
+
logging.warning(f"Skipping section {i+1}, expected a dictionary, got {type(section)}")
|
| 737 |
+
continue # Skip malformed section
|
| 738 |
+
|
| 739 |
+
image_url = section.get("image_url")
|
| 740 |
+
audio_url = section.get("audio_url")
|
| 741 |
+
section_text = section.get("section_text") # Get the text
|
| 742 |
+
|
| 743 |
+
logging.info(f"--- Processing Section {i+1}/{len(sections)} ---")
|
| 744 |
+
logging.info(f" Image URL: {image_url}")
|
| 745 |
+
logging.info(f" Audio URL: {audio_url}")
|
| 746 |
+
logging.info(f" Text: {str(section_text)[:50] if section_text else 'None'}")
|
| 747 |
+
|
| 748 |
+
# Download Image (Required for a section to be valid)
|
| 749 |
+
img_object = None
|
| 750 |
+
if image_url:
|
| 751 |
+
try:
|
| 752 |
+
img_resp = requests.get(image_url, timeout=30)
|
| 753 |
+
img_resp.raise_for_status()
|
| 754 |
+
img_object = Image.open(io.BytesIO(img_resp.content))
|
| 755 |
+
# Convert to RGB early to prevent potential palette issues later
|
| 756 |
+
img_object = img_object.convert("RGB")
|
| 757 |
+
logging.info(" β
Image downloaded and opened.")
|
| 758 |
+
except requests.exceptions.RequestException as e:
|
| 759 |
+
logging.error(f" β ERROR downloading image {image_url}: {e}")
|
| 760 |
+
download_errors = True
|
| 761 |
+
except UnidentifiedImageError: # Catch PIL errors
|
| 762 |
+
logging.error(f" β ERROR: Cannot identify image file from {image_url}. Invalid format or corrupt?")
|
| 763 |
+
download_errors = True
|
| 764 |
+
except Exception as e:
|
| 765 |
+
logging.error(f" β ERROR processing image {image_url}: {e}")
|
| 766 |
+
download_errors = True
|
| 767 |
+
else:
|
| 768 |
+
logging.warning(f" β οΈ No image_url for section {i+1}. Skipping section.")
|
| 769 |
+
# Don't add placeholders if image fails, just skip the index
|
| 770 |
+
|
| 771 |
+
# If image succeeded, process audio and text for this section index
|
| 772 |
+
if img_object:
|
| 773 |
+
image_pil_list.append(img_object)
|
| 774 |
+
section_texts_list.append(section_text) # Add text (can be None)
|
| 775 |
+
valid_section_indices.append(i) # Mark this index as valid
|
| 776 |
+
|
| 777 |
+
# Download Audio (Optional, will use silence if fails)
|
| 778 |
+
audio_path = None
|
| 779 |
+
if audio_url:
|
| 780 |
+
try:
|
| 781 |
+
aud_resp = requests.get(audio_url, timeout=60)
|
| 782 |
+
aud_resp.raise_for_status()
|
| 783 |
+
# Use a descriptive temp file name in system's temp dir
|
| 784 |
+
temp_dir = tempfile.gettempdir()
|
| 785 |
+
aud_filename = f"story_{story_id}_sec_{i}_audio_{uuid.uuid4().hex}.mp3" # Assume mp3, adjust if needed
|
| 786 |
+
audio_path = os.path.join(temp_dir, aud_filename)
|
| 787 |
+
with open(audio_path, "wb") as f:
|
| 788 |
+
f.write(aud_resp.content)
|
| 789 |
+
temp_files_to_clean.append(audio_path) # Add to cleanup list
|
| 790 |
+
logging.info(f" β
Audio downloaded to {audio_path}")
|
| 791 |
+
except requests.exceptions.RequestException as e:
|
| 792 |
+
logging.error(f" β ERROR downloading audio {audio_url}: {e}. Will use silence.")
|
| 793 |
+
# download_errors = True # Don't mark as overall error if only audio fails
|
| 794 |
+
audio_path = None # Ensure path is None on failure
|
| 795 |
+
except Exception as e:
|
| 796 |
+
logging.error(f" β ERROR saving audio {audio_url}: {e}. Will use silence.")
|
| 797 |
+
# download_errors = True
|
| 798 |
+
audio_path = None
|
| 799 |
+
else:
|
| 800 |
+
logging.info(" No audio_url for this section. Will use silence.")
|
| 801 |
+
|
| 802 |
+
audio_file_paths.append(audio_path) # Add path or None
|
| 803 |
+
|
| 804 |
+
else: # Image failed, so skip adding audio/text for this section index
|
| 805 |
+
logging.warning(f"Skipping audio/text for section {i+1} due to image failure.")
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
# Check if any valid sections remain
|
| 809 |
+
if not image_pil_list:
|
| 810 |
+
logging.error("β ERROR: No valid images could be downloaded or processed for any section.")
|
| 811 |
+
# upload_log() # Upload log before returning
|
| 812 |
+
return jsonify({'error': 'No images available for video generation', 'log_url': upload_log()}), 500
|
| 813 |
+
|
| 814 |
+
logging.info(f"Successfully processed {len(image_pil_list)} sections with images.")
|
| 815 |
+
if download_errors:
|
| 816 |
+
logging.warning("β οΈ Some assets encountered download/processing errors.")
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
# --- Handle Custom Logo/Watermark Download ---
|
| 820 |
+
custom_logo_path = None # Path to downloaded custom logo
|
| 821 |
+
watermark_opts_from_client = video_options.get("watermark_options", {})
|
| 822 |
+
watermark_final_config = watermark_opts_from_client.copy() # Start with client options
|
| 823 |
+
watermark_final_config["enabled"] = False # Default to disabled unless successfully set up
|
| 824 |
+
|
| 825 |
+
custom_logo_url = watermark_opts_from_client.get("custom_logo_url")
|
| 826 |
+
if watermark_opts_from_client.get("enabled") and custom_logo_url:
|
| 827 |
+
logging.info(f"β‘οΈ Downloading custom logo/watermark: {custom_logo_url}")
|
| 828 |
+
try:
|
| 829 |
+
logo_resp = requests.get(custom_logo_url, timeout=20)
|
| 830 |
+
logo_resp.raise_for_status()
|
| 831 |
+
# Save custom logo to a temp file, try to get extension
|
| 832 |
+
file_ext = os.path.splitext(urllib.parse.urlparse(custom_logo_url).path)[1] or '.png' # Default to png
|
| 833 |
+
custom_logo_filename = f"custom_logo_{uid}_{uuid.uuid4().hex}{file_ext}"
|
| 834 |
+
custom_logo_path = os.path.join(tempfile.gettempdir(), custom_logo_filename)
|
| 835 |
+
|
| 836 |
+
with open(custom_logo_path, "wb") as f:
|
| 837 |
+
f.write(logo_resp.content)
|
| 838 |
+
# Verify it's a valid image
|
| 839 |
+
try:
|
| 840 |
+
Image.open(custom_logo_path).verify() # Quick check
|
| 841 |
+
watermark_final_config["path"] = custom_logo_path # Update config with temp path
|
| 842 |
+
watermark_final_config["enabled"] = True # Enable watermark
|
| 843 |
+
temp_files_to_clean.append(custom_logo_path)
|
| 844 |
+
logging.info(f"β
Custom logo downloaded and verified: {custom_logo_path}")
|
| 845 |
+
except Exception as img_err:
|
| 846 |
+
logging.error(f"β Custom logo file from {custom_logo_url} is not a valid image: {img_err}. Disabling watermark.")
|
| 847 |
+
# Clean up invalid downloaded file immediately
|
| 848 |
+
if os.path.exists(custom_logo_path): os.remove(custom_logo_path)
|
| 849 |
+
|
| 850 |
+
except requests.exceptions.RequestException as e:
|
| 851 |
+
logging.error(f"β ERROR downloading custom logo {custom_logo_url}: {e}. Watermark disabled.")
|
| 852 |
+
except Exception as e:
|
| 853 |
+
logging.error(f"β ERROR processing custom logo {custom_logo_url}: {e}. Watermark disabled.")
|
| 854 |
+
elif watermark_opts_from_client.get("enabled"):
|
| 855 |
+
logging.warning("β οΈ Watermark enabled in options, but no 'custom_logo_url' provided. Watermark disabled.")
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
# --- Prepare Final Config for create_video ---
|
| 859 |
+
final_video_config = {
|
| 860 |
+
"width": video_options.get("width", DEFAULT_WIDTH),
|
| 861 |
+
"height": video_options.get("height", DEFAULT_HEIGHT),
|
| 862 |
+
"fps": video_options.get("fps", DEFAULT_FPS),
|
| 863 |
+
"transition": video_options.get("transition", "fade"),
|
| 864 |
+
"transition_duration": video_options.get("transition_duration", DEFAULT_TRANSITION_DURATION),
|
| 865 |
+
"font_path": video_options.get("font_path", DEFAULT_FONT), # Allow overriding default font
|
| 866 |
+
|
| 867 |
+
"subtitle_options": video_options.get("subtitle_options", {"enabled": True}), # Default enabled
|
| 868 |
+
|
| 869 |
+
"particle_options": video_options.get("particle_options", {"enabled": False}), # Default disabled
|
| 870 |
+
|
| 871 |
+
"watermark_options": watermark_final_config, # Use the processed watermark config
|
| 872 |
+
|
| 873 |
+
# Use default Sozo logo unless custom one provided? (Simplifying: only supporting default end logo for now)
|
| 874 |
+
"end_logo_options": {
|
| 875 |
+
"enabled": video_options.get("use_end_logo", True), # Control if end logo is used
|
| 876 |
+
"path": video_options.get("end_logo_path", DEFAULT_LOGO_PATH), # Allow overriding default logo path via options
|
| 877 |
+
"duration": video_options.get("end_logo_duration", 3.0)
|
| 878 |
+
},
|
| 879 |
+
}
|
| 880 |
+
|
| 881 |
+
# Ensure particle types list matches the number of *final* valid sections
|
| 882 |
+
particle_opts_config = final_video_config["particle_options"]
|
| 883 |
+
if particle_opts_config.get("enabled"):
|
| 884 |
+
particle_types_list_orig = particle_opts_config.get("types_per_section", [])
|
| 885 |
+
if isinstance(particle_types_list_orig, list):
|
| 886 |
+
# Filter the original particle list based on the indices of sections that were successfully processed
|
| 887 |
+
filtered_particle_types = [particle_types_list_orig[i] for i in valid_section_indices if i < len(particle_types_list_orig)]
|
| 888 |
+
# Pad with None if the original list was too short
|
| 889 |
+
if len(filtered_particle_types) < len(image_pil_list):
|
| 890 |
+
filtered_particle_types.extend([None] * (len(image_pil_list) - len(filtered_particle_types)))
|
| 891 |
+
particle_opts_config["types_per_section"] = filtered_particle_types
|
| 892 |
+
logging.info(f"Aligned particle types for {len(image_pil_list)} sections: {particle_opts_config['types_per_section']}")
|
| 893 |
+
else:
|
| 894 |
+
logging.warning("particle_options.types_per_section was not a list. Disabling particles.")
|
| 895 |
+
particle_opts_config["enabled"] = False
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
# --- Create the Video ---
|
| 899 |
+
# Define output path in temp directory
|
| 900 |
+
video_output_filename = f"final_video_{uid}_{story_id}_{uuid.uuid4().hex}.mp4"
|
| 901 |
+
video_output_path = os.path.join(tempfile.gettempdir(), video_output_filename)
|
| 902 |
+
# Don't add to cleanup list immediately - only if creation fails or after successful upload
|
| 903 |
+
|
| 904 |
+
logging.info("π Starting video creation with MoviePy...")
|
| 905 |
+
logging.info(f"Video Config Passed to create_video: {final_video_config}")
|
| 906 |
+
logging.info(f"Output Path: {video_output_path}")
|
| 907 |
+
logging.info(f"Number of image inputs: {len(image_pil_list)}")
|
| 908 |
+
logging.info(f"Number of audio inputs: {len(audio_file_paths)}")
|
| 909 |
+
logging.info(f"Number of text inputs: {len(section_texts_list)}")
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
# Call the MoviePy function from video_gen.py
|
| 913 |
+
generated_video_path = create_styled_video(
|
| 914 |
+
images=image_pil_list, # List of PIL images
|
| 915 |
+
audio_files=audio_file_paths, # List of paths (or None)
|
| 916 |
+
section_texts=section_texts_list,# List of strings (or None)
|
| 917 |
+
output_path=video_output_path,
|
| 918 |
+
config=final_video_config # The dictionary of options
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
# --- Handle Video Creation Result ---
|
| 922 |
+
if not generated_video_path or not os.path.exists(generated_video_path):
|
| 923 |
+
logging.error("β ERROR: Video generation failed (create_video returned None or file missing).")
|
| 924 |
+
# Add the intended output path to cleanup just in case a partial file exists
|
| 925 |
+
temp_files_to_clean.append(video_output_path)
|
| 926 |
+
# upload_log() # Upload log before returning
|
| 927 |
+
return jsonify({'error': 'Video generation failed', 'log_url': upload_log()}), 500
|
| 928 |
+
|
| 929 |
+
logging.info(f"β
Video generated successfully: {generated_video_path}")
|
| 930 |
+
# Add the successfully generated video path for cleanup after upload
|
| 931 |
+
temp_files_to_clean.append(generated_video_path)
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
# --- Upload Video to Firebase Storage ---
|
| 935 |
+
logging.info(f"βοΈ Uploading video '{os.path.basename(generated_video_path)}' to Firebase Storage...")
|
| 936 |
+
# Make blob name more descriptive
|
| 937 |
+
video_blob_name = f"stories/{uid}/{story_id}/video_{uuid.uuid4().hex}.mp4"
|
| 938 |
+
try:
|
| 939 |
+
# Assuming upload_to_storage handles the upload and returns public URL
|
| 940 |
+
video_url = upload_to_storage(generated_video_path, video_blob_name)
|
| 941 |
+
if not video_url:
|
| 942 |
+
raise Exception("Upload function returned no URL")
|
| 943 |
+
logging.info(f"β
Video uploaded successfully to: {video_url}")
|
| 944 |
+
except Exception as upload_err:
|
| 945 |
+
logging.error(f"β Firebase Storage Error: Failed to upload video: {upload_err}")
|
| 946 |
+
# upload_log() # Upload log before returning
|
| 947 |
+
# Don't deduct credits if upload fails
|
| 948 |
+
return jsonify({'error': 'Video generated but failed to upload to storage.', 'log_url': upload_log()}), 500
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
# --- Update Firebase Realtime Database ---
|
| 952 |
+
try:
|
| 953 |
+
story_ref.update({"video_url": video_url, "last_generated": time.time()}) # Add timestamp
|
| 954 |
+
logging.info(f"β
Updated story '{story_id}' record with video URL.")
|
| 955 |
+
except Exception as db_err:
|
| 956 |
+
logging.error(f"β Firebase DB Error: Failed to update story record: {db_err}")
|
| 957 |
+
# Decide if this is critical. Maybe log and continue, but don't deduct credits?
|
| 958 |
+
# For now, let's return an error as the client won't see the video URL in the story record.
|
| 959 |
+
# upload_log()
|
| 960 |
+
return jsonify({'error': 'Video generated and uploaded, but failed to update story record.', 'log_url': upload_log()}), 500
|
| 961 |
+
|
| 962 |
+
|
| 963 |
+
# --- Deduct Credits (Only after successful generation, upload, and DB update) ---
|
| 964 |
+
try:
|
| 965 |
+
new_credits = max(0, current_credits - video_cost)
|
| 966 |
+
user_ref.update({"credits": new_credits})
|
| 967 |
+
logging.info(f"β
Deducted {video_cost} credits for user {uid}. New balance: {new_credits}")
|
| 968 |
+
except Exception as credit_err:
|
| 969 |
+
logging.error(f"β Firebase DB Error: Failed to update user credits: {credit_err}")
|
| 970 |
+
# This is less critical, log it but still return success to user
|
| 971 |
+
# upload_log() # Upload log
|
| 972 |
+
|
| 973 |
+
# --- Success Response ---
|
| 974 |
+
# final_log_url = upload_log()
|
| 975 |
+
return jsonify({
|
| 976 |
+
"message": "Video generated and uploaded successfully!",
|
| 977 |
+
"video_url": video_url,
|
| 978 |
+
"new_credits": new_credits,
|
| 979 |
+
# "log_url": final_log_url
|
| 980 |
+
"log_url": "Log upload function placeholder" # Replace with actual call if needed
|
| 981 |
+
}), 200 # Use 200 OK for success
|
| 982 |
+
|
| 983 |
+
except Exception as e:
|
| 984 |
+
# --- Generic Error Handler ---
|
| 985 |
+
trace = traceback.format_exc()
|
| 986 |
+
logging.error(f"β UNHANDLED EXCEPTION in generate_video_endpoint: {str(e)}\n{trace}")
|
| 987 |
+
# log_url = upload_log() # Upload log before returning
|
| 988 |
+
return jsonify({
|
| 989 |
+
'error': f"An unexpected error occurred: {str(e)}",
|
| 990 |
+
# 'log_url': log_url
|
| 991 |
+
'log_url': "Log upload function placeholder"
|
| 992 |
+
}), 500
|
| 993 |
+
|
| 994 |
+
finally:
|
| 995 |
+
# --- Cleanup Temporary Files ---
|
| 996 |
+
logging.info(f"π§Ή Cleaning up {len(temp_files_to_clean)} temporary files...")
|
| 997 |
+
cleaned_count = 0
|
| 998 |
+
failed_count = 0
|
| 999 |
+
for file_path in temp_files_to_clean:
|
| 1000 |
+
if file_path and os.path.exists(file_path):
|
| 1001 |
+
try:
|
| 1002 |
+
os.remove(file_path)
|
| 1003 |
+
# logging.debug(f" - Removed: {file_path}")
|
| 1004 |
+
cleaned_count += 1
|
| 1005 |
+
except Exception as e:
|
| 1006 |
+
logging.error(f" - Failed to remove temp file {file_path}: {e}")
|
| 1007 |
+
failed_count += 1
|
| 1008 |
+
#else:
|
| 1009 |
+
# logging.debug(f" - Skipping non-existent path: {file_path}")
|
| 1010 |
+
|
| 1011 |
+
logging.info(f"β
Cleanup complete. Removed {cleaned_count} files, failed to remove {failed_count}.")
|
| 1012 |
+
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
#----------Image Editing Endpoint ----------
|
| 1016 |
@app.route('/api/story/<string:story_id>/sections/<int:section_idx>/edit-image', methods=['POST'])
|
| 1017 |
def edit_section_image_endpoint(story_id, section_idx):
|