Spaces:
Sleeping
Sleeping
File size: 81,015 Bytes
ac8b6fd f4e9c73 ac8b6fd f4e9c73 ac8b6fd 343240f df5bc04 ac8b6fd f4e9c73 571dfac 27077ed fbe84d9 343240f e8c58e3 703b008 5bfdda5 8ad64c2 5bfdda5 27077ed ac8b6fd b717354 27077ed 506a7e6 9d7c236 b717354 8f5bb15 9d7c236 506a7e6 ac8b6fd c73b0bd ac8b6fd f759364 ac8b6fd 04e1cca f759364 ac8b6fd 04e1cca ac8b6fd f759364 ac8b6fd c8fa2cf ac8b6fd f2a050e e15a876 ac8b6fd e15a876 ac8b6fd e15a876 ac8b6fd f2a050e e15a876 ac8b6fd e15a876 ac8b6fd e15a876 ac8b6fd e15a876 ac8b6fd c8fa2cf dae7069 27077ed 58b94ca c8fa2cf c73b0bd bacf5c7 c73b0bd c8fa2cf c73b0bd c8fa2cf c73b0bd c8fa2cf c73b0bd c8fa2cf 3cc4976 c73b0bd 5bfdda5 f759364 d2ae586 c73b0bd c8fa2cf ed4e2b3 f759364 5bfdda5 f759364 5bfdda5 c73b0bd 5bfdda5 c73b0bd ed4e2b3 0f02977 ed4e2b3 ab929c6 ed4e2b3 c73b0bd ed4e2b3 c73b0bd ed4e2b3 c73b0bd ed4e2b3 c73b0bd 5bfdda5 c73b0bd ed4e2b3 c73b0bd f759364 5bfdda5 f759364 5bfdda5 ed4e2b3 c73b0bd 5bfdda5 f759364 5bfdda5 f759364 bacf5c7 5bfdda5 bacf5c7 5bfdda5 0a3def3 5bfdda5 bacf5c7 5bfdda5 c73b0bd 5bfdda5 c73b0bd ed4e2b3 c73b0bd 5bfdda5 c73b0bd ed4e2b3 c73b0bd ed4e2b3 c73b0bd ed4e2b3 c73b0bd 5bfdda5 c73b0bd f759364 c73b0bd f759364 c73b0bd ed4e2b3 3cc4976 2cba343 c73b0bd c8fa2cf c73b0bd 2cba343 f759364 c8fa2cf c73b0bd c8fa2cf c73b0bd 27077ed 64f769f c73b0bd c8fa2cf 27077ed c8fa2cf 27077ed f5a2a59 64f769f c8fa2cf 27077ed f5a2a59 c8fa2cf 3cc4976 c73b0bd 27077ed f5a2a59 c73b0bd 27077ed c73b0bd 64f769f c73b0bd 27077ed f5a2a59 c73b0bd 27077ed f5a2a59 c73b0bd 27077ed 64f769f f5a2a59 c73b0bd 507b243 64f769f 27077ed 507b243 64f769f f5a2a59 64f769f 27077ed 64f769f 27077ed 507b243 f5a2a59 64f769f 27077ed c73b0bd 27077ed f5a2a59 c73b0bd f5a2a59 c73b0bd f5a2a59 fbe84d9 64f769f c73b0bd 27077ed f5a2a59 c73b0bd 27077ed 2cba343 f5a2a59 3cc4976 f5a2a59 c73b0bd c8fa2cf f5a2a59 c8fa2cf 8ad64c2 3402240 c8fa2cf 3402240 c8fa2cf 3402240 c8fa2cf 3402240 c8fa2cf 3402240 c8fa2cf 8cb2611 367f088 91b4722 8cb2611 367f088 ac8b6fd 16bf0af 8cb2611 343240f 16bf0af 2ca2fa8 d691d98 9f7d9b3 c09b0a8 9f7d9b3 c09b0a8 66b5d69 2500be7 343240f ef2c475 2ca2fa8 ef2c475 2ca2fa8 ef2c475 f2f2fdc ef2c475 2ca2fa8 ef2c475 343240f ef2c475 2ca2fa8 ef2c475 2ca2fa8 ef2c475 16bf0af 3724c78 16bf0af 2ca2fa8 16bf0af c09b0a8 343240f c09b0a8 2ca2fa8 343240f 2ef53c5 bc8b6e7 2ef53c5 343240f 2ef53c5 343240f 2ef53c5 16bf0af 2ca2fa8 c09b0a8 16bf0af 343240f 2ef53c5 9f7d9b3 2ef53c5 9f7d9b3 16bf0af 1a3fc95 703b008 9f7d9b3 c09b0a8 2ef53c5 9f7d9b3 2ca2fa8 3724c78 2ca2fa8 3724c78 f2f2fdc 2ef53c5 3724c78 2ca2fa8 703b008 2ca2fa8 2ef53c5 343240f 16bf0af 343240f 2ca2fa8 343240f c09b0a8 2ca2fa8 16bf0af 343240f 2ca2fa8 343240f c09b0a8 16bf0af 1a3fc95 703b008 9f7d9b3 c09b0a8 9f7d9b3 3724c78 2ca2fa8 703b008 2ca2fa8 16bf0af c09b0a8 42fc946 16bf0af 1a3fc95 703b008 9f7d9b3 c09b0a8 9f7d9b3 3724c78 2ca2fa8 703b008 2ca2fa8 343240f 16bf0af 343240f 16bf0af 3724c78 16bf0af ef2c475 2ca2fa8 703b008 ef2c475 c09b0a8 eb6fbc0 ac8b6fd f2a050e ac8b6fd 8658c17 9ae2f34 8658c17 5a717c4 9ae2f34 fcdf3fc 5a717c4 fcdf3fc 5a717c4 8658c17 5a717c4 8658c17 58b94ca 8658c17 58b94ca 677caa8 ac8b6fd f759364 ac8b6fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 |
import os
import io
import uuid
import re
import time
import tempfile
import requests
import json
import pandas as pd
from datetime import datetime
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS, cross_origin
from firebase_admin import credentials, db, storage, auth
import firebase_admin
from PIL import ImageFont, ImageDraw, Image
import logging
import traceback
from video_gen import create_video
import zipfile
from fpdf import FPDF
import tempfile
import urllib.parse
from stories import generateResponse
from styled_video_gen import create_styled_video, DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_FPS, DEFAULT_TRANSITION_DURATION, DEFAULT_FONT, DEFAULT_LOGO_PATH
# Initialize Flask app and CORS
app = Flask(__name__)
CORS(app)
# Firebase initialization
Firebase_DB = os.getenv("Firebase_DB")
Firebase_Storage = os.getenv("Firebase_Storage")
LOG_FILE_PATH = "/tmp/video_gen.log"
try:
# Retrieve the JSON content from the secret
credentials_json_string = os.environ.get("FIREBASE")
if credentials_json_string:
# Parse the JSON string into a Python dictionary
credentials_json = json.loads(credentials_json_string)
# Initialize Firebase Admin SDK
cred = credentials.Certificate(credentials_json)
firebase_admin.initialize_app(cred, {
'databaseURL': f'{Firebase_DB}',
'storageBucket': f'{Firebase_Storage}'
})
print("Firebase Admin SDK initialized successfully.")
else:
print("FIREBASE secret not set.")
except Exception as e:
print(f"Error initializing Firebase: {e}")
bucket = storage.bucket()
# Helper: Upload a local file to Firebase Storage and return its public URL
def upload_to_storage(local_path, destination_blob_name):
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(local_path)
return blob.public_url
# Gemini API initialization
api_key = os.environ['Gemini']
def configure_gemini():
genai.configure(api_key=api_key)
return genai.GenerativeModel('gemini-2.0-flash-thinking-exp')
# Helper functions
def verify_token(token):
try:
decoded_token = auth.verify_id_token(token)
return decoded_token['uid']
except Exception as e:
return None
def verify_admin(auth_header):
if not auth_header or not auth_header.startswith('Bearer '):
raise ValueError('Invalid token')
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
raise PermissionError('Invalid user')
user_ref = db.reference(f'users/{uid}')
user_data = user_ref.get()
if not user_data or not user_data.get('is_admin', False):
raise PermissionError('Admin access required')
return uid
# ---------- Dummy Admin Creation on Startup ----------
"""
def create_dummy_admin():
try:
# Try to get the user if it exists
admin_user = auth.get_user_by_email(admin_email)
except firebase_admin.auth.UserNotFoundError:
# Create the dummy admin if not found
admin_user = auth.create_user(email=admin_email, password=admin_password)
# Set or update admin record in the database
admin_ref = db.reference(f'users/{admin_user.uid}')
admin_data = admin_ref.get() or {}
if not admin_data.get('is_admin', False):
admin_ref.set({
'email': admin_email,
'credits': 9999, # Optionally, give admin lots of credits
'is_admin': True,
'created_at': datetime.utcnow().isoformat()
})
print(f"Dummy admin ready: {admin_email}")
"""
# ---------- Authentication Endpoints ----------
@app.route('/api/auth/signup', methods=['POST'])
def signup():
try:
data = request.get_json()
email = data.get('email')
password = data.get('password')
if not email or not password:
return jsonify({'error': 'Email and password are required'}), 400
# Create user in Firebase Auth
user = auth.create_user(email=email, password=password)
# Set initial user data in the realtime database with 3 starting credits
user_ref = db.reference(f'users/{user.uid}')
user_data = {
'email': email,
'credits': 30,
'is_admin': False,
'created_at': datetime.utcnow().isoformat()
}
user_ref.set(user_data)
return jsonify({
'success': True,
'user': {
'uid': user.uid,
**user_data
}
}), 201
except Exception as e:
return jsonify({'error': str(e)}), 400
# ---------- User Profile ----------
@app.route('/api/user/profile', methods=['GET'])
def get_user_profile():
try:
auth_header = request.headers.get('Authorization', '')
print("Received Auth Header (user):", auth_header) # Debugging
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
user_data = db.reference(f'users/{uid}').get()
print("Fetched User Data (user):", user_data) # Debugging
if not user_data:
return jsonify({'error': 'User not found'}), 404
return jsonify({
'uid': uid,
'email': user_data.get('email'),
'credits': user_data.get('credits', 0),
'is_admin': user_data.get('is_admin', False)
})
except Exception as e:
print(f"Error fetching user profile: {str(e)}")
return jsonify({'error': str(e)}), 500
@app.route('/api/auth/google-signin', methods=['POST'])
def google_signin():
try:
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
decoded_token = auth.verify_id_token(token) # Verify the token
uid = decoded_token['uid']
email = decoded_token.get('email')
# Check if user already exists in database
user_ref = db.reference(f'users/{uid}')
user_data = user_ref.get()
if not user_data:
# New user, create an entry in the database
user_data = {
'email': email,
'credits': 30, # Give new users initial credits
'is_admin': False,
'created_at': datetime.utcnow().isoformat(),
}
user_ref.set(user_data)
return jsonify({
'success': True,
'user': {
'uid': uid,
**user_data
}
}), 200
except Exception as e:
return jsonify({'error': str(e)}), 400
def upload_log():
"""Uploads the log file to Firebase Storage and returns the download URL."""
try:
log_blob_name = f"logs/{uuid.uuid4().hex}.log"
log_url = upload_to_storage(LOG_FILE_PATH, log_blob_name)
return log_url
except Exception as e:
logging.error(f"β ERROR: Failed to upload log file: {e}")
return None
@app.route('/api/feedback', methods=['POST'])
def submit_feedback():
"""
Allows a user to submit feedback, bug reports, or feature requests.
"""
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Parse Feedback Data ---
data = request.get_json()
feedback_type = data.get('type', 'general') # e.g. "bug", "feature_request", "general"
message = data.get('message')
if not message:
return jsonify({'error': 'message is required'}), 400
# Optionally, store some user info like email or name
user_data = db.reference(f'users/{uid}').get() or {}
user_email = user_data.get('email', 'unknown')
# Create a new feedback entry in "feedback" node
feedback_id = str(uuid.uuid4())
feedback_ref = db.reference(f'feedback/{feedback_id}')
feedback_record = {
"user_id": uid,
"user_email": user_email,
"type": feedback_type,
"message": message,
"created_at": datetime.utcnow().isoformat(),
"status": "open" # admin can mark "resolved" or "in progress"
}
feedback_ref.set(feedback_record)
return jsonify({"success": True, "feedback_id": feedback_id}), 201
except Exception as e:
return jsonify({'error': str(e)}), 500
# -----------------------
# Content
# -----------------------
# ---------- Story Generation Endpoint ----------
# Configure logging to write to a file
logging.basicConfig(filename=LOG_FILE_PATH, level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
@app.route('/api/story/generate', methods=['POST'])
def generate_story_endpoint():
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Check User Credits Before Generation ---
user_ref = db.reference(f"users/{uid}")
user_data = user_ref.get() or {}
current_credits = user_data.get("credits", 0)
if current_credits < 5:
return jsonify({'error': 'Insufficient credits. You need at least 5 credits to generate a story.'}), 403
# --- Read Request Data ---
data = request.form.to_dict() # For multipart/form-data
input_type = data.get('input_type', 'text')
prompt = data.get('prompt') # For "text" only
story_type = data.get('story_type', 'free_form')
style = data.get('style', 'whimsical')
voice_model = data.get('voice_model', 'aura-asteria-en')
image_model = data.get('image_model', 'hf')
audio_model = data.get('audio_model', 'deepgram')
if input_type not in ["text", "pdf", "wiki", "bible", "youtube", "dataframe"]:
return jsonify({'error': 'Unsupported input_type'}), 400
# Optionally retrieve these fields if relevant
wiki_url = data.get("wiki_url")
bible_reference = data.get("bible_reference")
youtube_url = data.get("youtube_url")
ext = data.get("ext") # For dataframe usage
# Prepare for story generation
from stories import (
generate_story_from_text,
get_pdf_text,
get_df,
generate_story_from_dataframe,
generateResponse
)
story_gen_start = time.time()
full_story = None
# 1) Generate the full story text
if input_type == "text":
if not prompt:
return jsonify({'error': 'Prompt is required for text input'}), 400
full_story = generate_story_from_text(prompt, story_type)
elif input_type == "pdf":
uploaded_file = request.files.get("file")
if not uploaded_file:
return jsonify({'error': 'No PDF file uploaded'}), 400
pdf_text = get_pdf_text(uploaded_file)
full_story = generate_story_from_text(pdf_text, story_type)
elif input_type == "dataframe":
uploaded_file = request.files.get("file")
if not uploaded_file or not ext:
return jsonify({'error': 'File and ext are required for dataframe input'}), 400
df = get_df(uploaded_file, ext)
if df is None:
return jsonify({'error': f'Failed to read {ext} file'}), 400
full_story = generate_story_from_dataframe(df, story_type)
elif input_type == "wiki":
if not wiki_url:
return jsonify({'error': 'wiki_url is required for input_type "wiki"'}), 400
from stories import generate_story_from_wiki
full_story = generate_story_from_wiki(wiki_url, story_type)
elif input_type == "bible":
if not bible_reference:
return jsonify({'error': 'bible_reference is required for input_type "bible"'}), 400
from stories import generate_story_from_bible
full_story = generate_story_from_bible(bible_reference, story_type)
elif input_type == "youtube":
if not youtube_url:
return jsonify({'error': 'youtube_url is required for input_type "youtube"'}), 400
from stories import generate_story_from_youtube
full_story = generate_story_from_youtube(youtube_url, story_type)
# Measure generation time
story_gen_end = time.time()
story_generation_time = story_gen_end - story_gen_start
if not full_story:
return jsonify({'error': 'Story generation failed'}), 500
# 2) Split into 5 sections
sections_raw = [s.strip() for s in full_story.split("[break]") if s.strip()]
if len(sections_raw) < 5:
sections_raw += ["(Placeholder section)"] * (5 - len(sections_raw))
elif len(sections_raw) > 5:
sections_raw = sections_raw[:5]
sections = []
image_generation_times = []
audio_generation_times = []
from image_gen import generate_image_with_retry
from audio_gen import generate_audio
# If input_type is "dataframe", re-use df for chart generation
df = None
if input_type == "dataframe":
uploaded_file = request.files.get("file")
if uploaded_file and ext:
df = get_df(uploaded_file, ext)
# 3) Process each section
for section_text in sections_raw:
img_prompt_match = re.search(r"<(.*?)>", section_text)
img_prompt = img_prompt_match.group(1).strip() if img_prompt_match else section_text[:100]
image_start = time.time()
image_obj = None
# Attempt chart generation if dataframe
if input_type == "dataframe" and df is not None:
try:
chart_str = generateResponse(img_prompt, df)
logging.info(f"chart string: {chart_str}")
if chart_str and chart_str.startswith("data:image/png;base64,"):
base64_data = chart_str.split(",", 1)[1]
logging.info(f"base64 data: {base64_data}")
chart_bytes = base64.b64decode(chart_bytes)
image_obj = Image.open(io.BytesIO(chart_bytes))
logging.info(f"Image: {image_obj}")
except Exception as e:
logging.error(f"error {e}, DataFrame chart generation error")
print("DataFrame chart generation error:", e)
# Fallback to generate_image_with_retry
if not image_obj:
image_obj, _ = generate_image_with_retry(img_prompt, style, model=image_model)
image_end = time.time()
image_generation_times.append(image_end - image_start)
# Save & upload
image_filename = f"/tmp/{uuid.uuid4().hex}.jpg"
image_obj.save(image_filename, format="JPEG")
image_blob_name = f"stories/{uid}/{uuid.uuid4().hex}.jpg"
image_url = upload_to_storage(image_filename, image_blob_name)
os.remove(image_filename)
# Generate audio without <image> description
audio_text = re.sub(r"<.*?>", "", section_text)
audio_start = time.time()
audio_file_path = generate_audio(audio_text, voice_model, audio_model=audio_model)
audio_end = time.time()
audio_generation_times.append(audio_end - audio_start)
audio_blob_name = f"stories/{uid}/{uuid.uuid4().hex}.mp3"
audio_url = upload_to_storage(audio_file_path, audio_blob_name)
os.remove(audio_file_path)
sections.append({
"section_text": section_text,
"image_url": image_url,
"audio_url": audio_url
})
# 4) Store the story
story_id = str(uuid.uuid4())
story_ref = db.reference(f"stories/{story_id}")
# ---------- STORE INPUT PARAMS -----------
input_params = {
"input_type": input_type,
"prompt": prompt, # might be None if pdf/dataframe
"wiki_url": wiki_url, # might be None
"bible_reference": bible_reference,
"youtube_url": youtube_url,
"story_type": story_type,
"style": style,
"voice_model": voice_model,
"image_model": image_model,
"audio_model": audio_model,
"ext": ext # for dataframe
}
# -----------------------------------------
story_record = {
"uid": uid,
"full_story": full_story,
"sections": sections,
"generation_times": {
"story_generation_time": story_generation_time,
"image_generation_times": image_generation_times,
"audio_generation_times": audio_generation_times
},
"created_at": datetime.utcnow().isoformat(),
"input_type": input_type,
"story_type": story_type,
"input_params": input_params # <-- store them here
}
story_ref.set(story_record)
# Subtract 5 Credits
new_credits = current_credits - 5
user_ref.update({"credits": new_credits})
preview = sections[0] if sections else {}
return jsonify({
"story_id": story_id,
"full_story": full_story,
"preview": preview,
"sections": sections,
"generation_times": story_record["generation_times"],
"new_credits": new_credits,
"input_params": input_params # Return them if you want
})
except Exception as e:
return jsonify({'error': str(e)}), 500
# ---------- Video Generation Endpoint ----------
# Configure logging to write to a file
logging.basicConfig(filename=LOG_FILE_PATH, level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
@app.route('/api/video/generate', methods=['POST'])
def generate_video_endpoint():
try:
logging.info("β‘οΈ Received video generation request...")
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
logging.error("β ERROR: Missing or invalid token")
return jsonify({
'error': 'Missing or invalid token',
'log_url': upload_log() # Upload log file so you can see the error
}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
logging.error("β ERROR: Invalid or expired token")
return jsonify({
'error': 'Invalid or expired token',
'log_url': upload_log()
}), 401
user_ref = db.reference(f"users/{uid}")
user_data = user_ref.get() or {}
current_credits = user_data.get("credits", 0)
if current_credits < 5:
return jsonify({'error': 'Insufficient credits. You need at least 5 credits to generate a video.'}), 403
data = request.get_json()
story_id = data.get('story_id')
if not story_id:
logging.error("β ERROR: story_id is required")
return jsonify({
'error': 'story_id is required',
'log_url': upload_log()
}), 400
logging.info(f"Fetching story {story_id} from Firebase...")
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
logging.error("β ERROR: Story not found")
return jsonify({
'error': 'Story not found',
'log_url': upload_log()
}), 404
sections = story_data.get("sections", [])
if not sections:
logging.error("β ERROR: No sections found in the story")
return jsonify({
'error': 'No sections found in the story',
'log_url': upload_log()
}), 404
image_files = []
audio_files = []
logging.info(f"Processing {len(sections)} sections...")
# Download each image and audio file
for section in sections:
image_url = section.get("image_url")
audio_url = section.get("audio_url")
logging.info(f"β‘οΈ Downloading image from: {image_url}")
img_resp = requests.get(image_url)
if img_resp.status_code == 200:
img = Image.open(io.BytesIO(img_resp.content))
image_files.append(img)
logging.info("β
Image downloaded successfully.")
else:
logging.error(f"β ERROR: Failed to download image {image_url}")
logging.info(f"β‘οΈ Downloading audio from: {audio_url}")
aud_resp = requests.get(audio_url)
if aud_resp.status_code == 200:
aud_path = f"/tmp/{uuid.uuid4().hex}.mp3"
with open(aud_path, "wb") as f:
f.write(aud_resp.content)
audio_files.append(aud_path)
logging.info("β
Audio downloaded successfully.")
else:
logging.error(f"β ERROR: Failed to download audio {audio_url}")
if not image_files:
logging.error("β ERROR: No valid images found")
return jsonify({
'error': 'No images available for video generation',
'log_url': upload_log()
}), 500
# Create the video
video_output_path = f"/tmp/{uuid.uuid4().hex}.mp4"
logging.info("Starting create_video...")
video_file = create_video(image_files, audio_files, output_path=video_output_path)
if not video_file:
logging.error("β ERROR: Video generation failed")
return jsonify({
'error': 'Video generation failed',
'log_url': upload_log()
}), 500
logging.info(f"β
Video generated successfully: {video_file}")
# Upload the video to Firebase Storage
logging.info("Uploading video to Firebase Storage...")
video_blob_name = f"stories/{uid}/{uuid.uuid4().hex}.mp4"
video_url = upload_to_storage(video_file, video_blob_name)
logging.info(f"β
Video uploaded to {video_url}")
# Update the story record with the video URL
story_ref.update({"video_url": video_url})
# Deduct 5 credits
new_credits = max(0, current_credits - 5)
user_ref.update({"credits": new_credits})
logging.info(f"β
Deducted 5 credits. New credit balance: {new_credits}")
return jsonify({
"video_url": video_url,
"new_credits": new_credits,
"log_url": upload_log() # Upload the final log so you can inspect it
})
except Exception as e:
trace = traceback.format_exc()
logging.error(f"β EXCEPTION: {str(e)}\n{trace}")
return jsonify({
'error': str(e),
'log_url': upload_log()
}), 500
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# --- Styled Video Generation Endpoint ---
@app.route('/api/styled_video/generate', methods=['POST']) # Uncomment when deploying in Flask app
def generate_styled_video_endpoint(): # Use this if testing standalone
"""
Generates a video based on story data from Firebase, applying client-specified options.
"""
# --- Temporary file list for cleanup ---
temp_files_to_clean = []
video_output_path = None # Define here for broader scope in finally block
try:
logging.info("β‘οΈ Received video generation request...")
# --- Authentication & Authorization ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
logging.error("β Auth Error: Missing or invalid token format")
return jsonify({'error': 'Missing or invalid token', 'log_url': upload_log()}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token) # Assumes verify_token returns UID or None
if not uid:
logging.error("β Auth Error: Invalid or expired token")
return jsonify({'error': 'Invalid or expired token', 'log_url': upload_log()}), 401
logging.info(f"Authenticated user: {uid}")
# --- Check User Credits ---
user_ref = db.reference(f"users/{uid}") # Assumes this DB path structure
user_data = user_ref.get()
if not user_data: user_data = {} # Handle case where user node might not exist yet
current_credits = user_data.get("credits", 0)
video_cost = 10 # Define video cost
if current_credits < video_cost:
logging.warning(f"Insufficient credits for user {uid} (has {current_credits}, needs {video_cost})")
return jsonify({'error': f'Insufficient credits. You need at least {video_cost} credits.', 'log_url': upload_log()}), 403
# --- Get Request Data ---
data = request.get_json()
if not data:
logging.error("β Request Error: Invalid or missing JSON payload")
return jsonify({'error': 'Invalid JSON payload', 'log_url': upload_log()}), 400
story_id = data.get('story_id')
if not story_id:
logging.error("β Request Error: story_id is required")
return jsonify({'error': 'story_id is required', 'log_url': upload_log()}), 400
# Get video customization options from the request (or use empty dict for defaults)
video_options = data.get('video_options', {})
logging.info(f"Received video options: {video_options}")
# --- Fetch Story Data ---
logging.info(f"Fetching story '{story_id}' for user '{uid}' from Firebase...")
# Consider structuring story data under UID: db.reference(f"stories/{uid}/{story_id}")
# Using the path from your original code for now:
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
logging.error(f"β Firebase Error: Story '{story_id}' not found.")
return jsonify({'error': 'Story not found', 'log_url': upload_log()}), 404
sections = story_data.get("sections", [])
if not sections or not isinstance(sections, list):
logging.error(f"β Data Error: No valid 'sections' array found in story '{story_id}'.")
return jsonify({'error': 'No sections found in the story', 'log_url': upload_log()}), 404
# --- Download Assets and Prepare Data ---
image_pil_list = [] # Stores downloaded PIL Images
audio_file_paths = [] # Stores paths to downloaded audio files
section_texts_list = [] # Stores text for each section
valid_section_indices = [] # Keep track of sections processed successfully
logging.info(f"Processing {len(sections)} sections for assets...")
download_errors = False
for i, section in enumerate(sections):
if not isinstance(section, dict):
logging.warning(f"Skipping section {i+1}, expected a dictionary, got {type(section)}")
continue # Skip malformed section
image_url = section.get("image_url")
audio_url = section.get("audio_url")
section_text = section.get("section_text") # Get the text
logging.info(f"--- Processing Section {i+1}/{len(sections)} ---")
logging.info(f" Image URL: {image_url}")
logging.info(f" Audio URL: {audio_url}")
logging.info(f" Text: {str(section_text)[:50] if section_text else 'None'}")
# Download Image (Required for a section to be valid)
img_object = None
if image_url:
try:
img_resp = requests.get(image_url, timeout=30)
img_resp.raise_for_status()
img_object = Image.open(io.BytesIO(img_resp.content))
# Convert to RGB early to prevent potential palette issues later
img_object = img_object.convert("RGB")
logging.info(" β
Image downloaded and opened.")
except requests.exceptions.RequestException as e:
logging.error(f" β ERROR downloading image {image_url}: {e}")
download_errors = True
except UnidentifiedImageError: # Catch PIL errors
logging.error(f" β ERROR: Cannot identify image file from {image_url}. Invalid format or corrupt?")
download_errors = True
except Exception as e:
logging.error(f" β ERROR processing image {image_url}: {e}")
download_errors = True
else:
logging.warning(f" β οΈ No image_url for section {i+1}. Skipping section.")
# Don't add placeholders if image fails, just skip the index
# If image succeeded, process audio and text for this section index
if img_object:
image_pil_list.append(img_object)
section_texts_list.append(section_text) # Add text (can be None)
valid_section_indices.append(i) # Mark this index as valid
# Download Audio (Optional, will use silence if fails)
audio_path = None
if audio_url:
try:
aud_resp = requests.get(audio_url, timeout=60)
aud_resp.raise_for_status()
# Use a descriptive temp file name in system's temp dir
temp_dir = tempfile.gettempdir()
aud_filename = f"story_{story_id}_sec_{i}_audio_{uuid.uuid4().hex}.mp3" # Assume mp3, adjust if needed
audio_path = os.path.join(temp_dir, aud_filename)
with open(audio_path, "wb") as f:
f.write(aud_resp.content)
temp_files_to_clean.append(audio_path) # Add to cleanup list
logging.info(f" β
Audio downloaded to {audio_path}")
except requests.exceptions.RequestException as e:
logging.error(f" β ERROR downloading audio {audio_url}: {e}. Will use silence.")
# download_errors = True # Don't mark as overall error if only audio fails
audio_path = None # Ensure path is None on failure
except Exception as e:
logging.error(f" β ERROR saving audio {audio_url}: {e}. Will use silence.")
# download_errors = True
audio_path = None
else:
logging.info(" No audio_url for this section. Will use silence.")
audio_file_paths.append(audio_path) # Add path or None
else: # Image failed, so skip adding audio/text for this section index
logging.warning(f"Skipping audio/text for section {i+1} due to image failure.")
# Check if any valid sections remain
if not image_pil_list:
logging.error("β ERROR: No valid images could be downloaded or processed for any section.")
# upload_log() # Upload log before returning
return jsonify({'error': 'No images available for video generation', 'log_url': upload_log()}), 500
logging.info(f"Successfully processed {len(image_pil_list)} sections with images.")
if download_errors:
logging.warning("β οΈ Some assets encountered download/processing errors.")
# --- Handle Custom Logo/Watermark Download ---
custom_logo_path = None # Path to downloaded custom logo
watermark_opts_from_client = video_options.get("watermark_options", {})
watermark_final_config = watermark_opts_from_client.copy() # Start with client options
watermark_final_config["enabled"] = False # Default to disabled unless successfully set up
custom_logo_url = watermark_opts_from_client.get("custom_logo_url")
if watermark_opts_from_client.get("enabled") and custom_logo_url:
logging.info(f"β‘οΈ Downloading custom logo/watermark: {custom_logo_url}")
try:
logo_resp = requests.get(custom_logo_url, timeout=20)
logo_resp.raise_for_status()
# Save custom logo to a temp file, try to get extension
file_ext = os.path.splitext(urllib.parse.urlparse(custom_logo_url).path)[1] or '.png' # Default to png
custom_logo_filename = f"custom_logo_{uid}_{uuid.uuid4().hex}{file_ext}"
custom_logo_path = os.path.join(tempfile.gettempdir(), custom_logo_filename)
with open(custom_logo_path, "wb") as f:
f.write(logo_resp.content)
# Verify it's a valid image
try:
Image.open(custom_logo_path).verify() # Quick check
watermark_final_config["path"] = custom_logo_path # Update config with temp path
watermark_final_config["enabled"] = True # Enable watermark
temp_files_to_clean.append(custom_logo_path)
logging.info(f"β
Custom logo downloaded and verified: {custom_logo_path}")
except Exception as img_err:
logging.error(f"β Custom logo file from {custom_logo_url} is not a valid image: {img_err}. Disabling watermark.")
# Clean up invalid downloaded file immediately
if os.path.exists(custom_logo_path): os.remove(custom_logo_path)
except requests.exceptions.RequestException as e:
logging.error(f"β ERROR downloading custom logo {custom_logo_url}: {e}. Watermark disabled.")
except Exception as e:
logging.error(f"β ERROR processing custom logo {custom_logo_url}: {e}. Watermark disabled.")
elif watermark_opts_from_client.get("enabled"):
logging.warning("β οΈ Watermark enabled in options, but no 'custom_logo_url' provided. Watermark disabled.")
# --- Prepare Final Config for create_video ---
final_video_config = {
"width": video_options.get("width", DEFAULT_WIDTH),
"height": video_options.get("height", DEFAULT_HEIGHT),
"fps": video_options.get("fps", DEFAULT_FPS),
"transition": video_options.get("transition", "fade"),
"transition_duration": video_options.get("transition_duration", DEFAULT_TRANSITION_DURATION),
"font_path": video_options.get("font_path", DEFAULT_FONT), # Allow overriding default font
"subtitle_options": video_options.get("subtitle_options", {"enabled": True}), # Default enabled
"particle_options": video_options.get("particle_options", {"enabled": False}), # Default disabled
"watermark_options": watermark_final_config, # Use the processed watermark config
# Use default Sozo logo unless custom one provided? (Simplifying: only supporting default end logo for now)
"end_logo_options": {
"enabled": video_options.get("use_end_logo", True), # Control if end logo is used
"path": video_options.get("end_logo_path", DEFAULT_LOGO_PATH), # Allow overriding default logo path via options
"duration": video_options.get("end_logo_duration", 3.0)
},
}
# Ensure particle types list matches the number of *final* valid sections
particle_opts_config = final_video_config["particle_options"]
if particle_opts_config.get("enabled"):
particle_types_list_orig = particle_opts_config.get("types_per_section", [])
if isinstance(particle_types_list_orig, list):
# Filter the original particle list based on the indices of sections that were successfully processed
filtered_particle_types = [particle_types_list_orig[i] for i in valid_section_indices if i < len(particle_types_list_orig)]
# Pad with None if the original list was too short
if len(filtered_particle_types) < len(image_pil_list):
filtered_particle_types.extend([None] * (len(image_pil_list) - len(filtered_particle_types)))
particle_opts_config["types_per_section"] = filtered_particle_types
logging.info(f"Aligned particle types for {len(image_pil_list)} sections: {particle_opts_config['types_per_section']}")
else:
logging.warning("particle_options.types_per_section was not a list. Disabling particles.")
particle_opts_config["enabled"] = False
# --- Create the Video ---
# Define output path in temp directory
video_output_filename = f"final_video_{uid}_{story_id}_{uuid.uuid4().hex}.mp4"
video_output_path = os.path.join(tempfile.gettempdir(), video_output_filename)
# Don't add to cleanup list immediately - only if creation fails or after successful upload
logging.info("π Starting video creation with MoviePy...")
logging.info(f"Video Config Passed to create_video: {final_video_config}")
logging.info(f"Output Path: {video_output_path}")
logging.info(f"Number of image inputs: {len(image_pil_list)}")
logging.info(f"Number of audio inputs: {len(audio_file_paths)}")
logging.info(f"Number of text inputs: {len(section_texts_list)}")
# Call the MoviePy function from video_gen.py
generated_video_path = create_styled_video(
images=image_pil_list, # List of PIL images
audio_files=audio_file_paths, # List of paths (or None)
section_texts=section_texts_list,# List of strings (or None)
output_path=video_output_path,
config=final_video_config # The dictionary of options
)
# --- Handle Video Creation Result ---
if not generated_video_path or not os.path.exists(generated_video_path):
logging.error("β ERROR: Video generation failed (create_video returned None or file missing).")
# Add the intended output path to cleanup just in case a partial file exists
temp_files_to_clean.append(video_output_path)
# upload_log() # Upload log before returning
return jsonify({'error': 'Video generation failed', 'log_url': upload_log()}), 500
logging.info(f"β
Video generated successfully: {generated_video_path}")
# Add the successfully generated video path for cleanup after upload
temp_files_to_clean.append(generated_video_path)
# --- Upload Video to Firebase Storage ---
logging.info(f"βοΈ Uploading video '{os.path.basename(generated_video_path)}' to Firebase Storage...")
# Make blob name more descriptive
video_blob_name = f"stories/{uid}/{story_id}/video_{uuid.uuid4().hex}.mp4"
try:
# Assuming upload_to_storage handles the upload and returns public URL
video_url = upload_to_storage(generated_video_path, video_blob_name)
if not video_url:
raise Exception("Upload function returned no URL")
logging.info(f"β
Video uploaded successfully to: {video_url}")
except Exception as upload_err:
logging.error(f"β Firebase Storage Error: Failed to upload video: {upload_err}")
# upload_log() # Upload log before returning
# Don't deduct credits if upload fails
return jsonify({'error': 'Video generated but failed to upload to storage.', 'log_url': upload_log()}), 500
# --- Update Firebase Realtime Database ---
try:
story_ref.update({"video_url": video_url, "last_generated": time.time()}) # Add timestamp
logging.info(f"β
Updated story '{story_id}' record with video URL.")
except Exception as db_err:
logging.error(f"β Firebase DB Error: Failed to update story record: {db_err}")
# Decide if this is critical. Maybe log and continue, but don't deduct credits?
# For now, let's return an error as the client won't see the video URL in the story record.
# upload_log()
return jsonify({'error': 'Video generated and uploaded, but failed to update story record.', 'log_url': upload_log()}), 500
# --- Deduct Credits (Only after successful generation, upload, and DB update) ---
try:
new_credits = max(0, current_credits - video_cost)
user_ref.update({"credits": new_credits})
logging.info(f"β
Deducted {video_cost} credits for user {uid}. New balance: {new_credits}")
except Exception as credit_err:
logging.error(f"β Firebase DB Error: Failed to update user credits: {credit_err}")
# This is less critical, log it but still return success to user
# upload_log() # Upload log
# --- Success Response ---
# final_log_url = upload_log()
return jsonify({
"message": "Video generated and uploaded successfully!",
"video_url": video_url,
"new_credits": new_credits,
# "log_url": final_log_url
"log_url": "Log upload function placeholder" # Replace with actual call if needed
}), 200 # Use 200 OK for success
except Exception as e:
# --- Generic Error Handler ---
trace = traceback.format_exc()
logging.error(f"β UNHANDLED EXCEPTION in generate_video_endpoint: {str(e)}\n{trace}")
# log_url = upload_log() # Upload log before returning
return jsonify({
'error': f"An unexpected error occurred: {str(e)}",
# 'log_url': log_url
'log_url': "Log upload function placeholder"
}), 500
finally:
# --- Cleanup Temporary Files ---
logging.info(f"π§Ή Cleaning up {len(temp_files_to_clean)} temporary files...")
cleaned_count = 0
failed_count = 0
for file_path in temp_files_to_clean:
if file_path and os.path.exists(file_path):
try:
os.remove(file_path)
# logging.debug(f" - Removed: {file_path}")
cleaned_count += 1
except Exception as e:
logging.error(f" - Failed to remove temp file {file_path}: {e}")
failed_count += 1
#else:
# logging.debug(f" - Skipping non-existent path: {file_path}")
logging.info(f"β
Cleanup complete. Removed {cleaned_count} files, failed to remove {failed_count}.")
#----------Image Editing Endpoint ----------
@app.route('/api/story/<string:story_id>/sections/<int:section_idx>/edit-image', methods=['POST'])
def edit_section_image_endpoint(story_id, section_idx):
try:
# 1) Auth
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# 2) Fetch story and verify ownership
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
return jsonify({'error': 'Story not found'}), 404
if story_data.get('uid') != uid:
return jsonify({'error': 'Unauthorized'}), 403
sections = story_data.get("sections", [])
if section_idx < 0 or section_idx >= len(sections):
return jsonify({'error': 'Invalid section index'}), 400
# 3) Check user credits
user_ref = db.reference(f"users/{uid}")
user_data = user_ref.get() or {}
current_credits = user_data.get("credits", 0)
if current_credits < 2:
return jsonify({'error': 'Not enough credits to edit image. Need 2 credits.'}), 403
# 4) Read gemini_prompt
data = request.get_json()
gemini_prompt = data.get('gemini_prompt')
if not gemini_prompt:
return jsonify({'error': 'gemini_prompt is required'}), 400
# 5) Edit the image
from image_gen import edit_section_image
old_image_url = sections[section_idx].get("image_url")
if not old_image_url:
return jsonify({'error': 'No existing image in this section'}), 400
edited_image_obj = edit_section_image(old_image_url, gemini_prompt)
if not edited_image_obj:
return jsonify({'error': 'Failed to edit image'}), 500
# 6) Upload new image
new_filename = f"/tmp/{uuid.uuid4().hex}.jpg"
edited_image_obj.save(new_filename, format="JPEG")
new_blob_name = f"stories/{uid}/{uuid.uuid4().hex}.jpg"
new_image_url = upload_to_storage(new_filename, new_blob_name)
os.remove(new_filename)
# 7) Update the story record
sections[section_idx]["image_url"] = new_image_url
story_ref.update({"sections": sections})
# 8) Subtract 2 credits
new_credits = current_credits - 2
user_ref.update({"credits": new_credits})
return jsonify({
'success': True,
'new_image_url': new_image_url,
'new_credits': new_credits
}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
#----------Text & Audio Editing Endpoint ----------
@app.route('/api/story/<string:story_id>/sections/<int:section_idx>/edit-text', methods=['POST'])
def edit_section_text_endpoint(story_id, section_idx):
try:
# 1) Auth
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# 2) Fetch story and verify ownership
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
return jsonify({'error': 'Story not found'}), 404
if story_data.get('uid') != uid:
return jsonify({'error': 'Unauthorized'}), 403
sections = story_data.get("sections", [])
if section_idx < 0 or section_idx >= len(sections):
return jsonify({'error': 'Invalid section index'}), 400
# 3) Check user credits
user_ref = db.reference(f"users/{uid}")
user_data = user_ref.get() or {}
current_credits = user_data.get("credits", 0)
if current_credits < 2:
return jsonify({'error': 'Not enough credits to edit text. Need 2 credits.'}), 403
# 4) Read new_text
data = request.get_json()
new_text = data.get('new_text')
if not new_text:
return jsonify({'error': 'new_text is required'}), 400
voice_model = data.get('voice_model', 'aura-asteria-en')
audio_model = data.get('audio_model', 'deepgram')
from audio_gen import edit_section_text
old_section_text = sections[section_idx].get("section_text", "")
# 5) Edit text -> generate new audio
updated_text, new_audio_path = edit_section_text(
old_section_text, new_text,
voice_model=voice_model,
audio_model=audio_model
)
if not updated_text or not new_audio_path:
return jsonify({'error': 'Failed to edit text/audio'}), 500
# 6) Upload new audio
new_blob_name = f"stories/{uid}/{uuid.uuid4().hex}.mp3"
new_audio_url = upload_to_storage(new_audio_path, new_blob_name)
os.remove(new_audio_path)
# 7) Update the story record
sections[section_idx]["section_text"] = updated_text
sections[section_idx]["audio_url"] = new_audio_url
story_ref.update({"sections": sections})
# 8) Subtract 2 credits
new_credits = current_credits - 2
user_ref.update({"credits": new_credits})
return jsonify({
'success': True,
'updated_text': updated_text,
'new_audio_url': new_audio_url,
'new_credits': new_credits
}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
#----------View projects and videos Endpoints ----------
@app.route('/api/view/projects', methods=['GET'])
def view_projects():
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Query Stories for the Authenticated User ---
stories_ref = db.reference('stories')
all_stories = stories_ref.get() or {}
user_stories = {}
for story_id, story_record in all_stories.items():
# Only return projects that belong to the current user
if story_record.get('uid') == uid:
user_stories[story_id] = {
"story_id": story_id,
"full_story": story_record.get("full_story", ""),
"sections": story_record.get("sections", []),
"generation_times": story_record.get("generation_times", {}),
"created_at": story_record.get("created_at", ""),
"input_type": story_record.get("input_type", ""),
"input_params": story_record.get("input_params", {}),
"story_type": story_record.get("story_type", ""),
"video_url": story_record.get("video_url", "") # Include video URL if present
}
# Optionally, sort the projects by creation date (newest first)
sorted_stories = dict(
sorted(user_stories.items(), key=lambda item: item[1]["created_at"], reverse=True)
)
return jsonify({"projects": sorted_stories})
except Exception as e:
return jsonify({'error': str(e)}), 500
# view videos endpoint
@app.route('/api/view/videos', methods=['GET'])
def view_videos():
"""
Returns only the stories that have a 'video_url' field,
meaning the video has been generated.
"""
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Query Stories for the Authenticated User ---
stories_ref = db.reference('stories')
all_stories = stories_ref.get() or {}
user_videos = {}
for story_id, story_record in all_stories.items():
# Must belong to current user and have a video_url
if story_record.get('uid') == uid and story_record.get('video_url'):
user_videos[story_id] = {
"story_id": story_id,
"full_story": story_record.get("full_story", ""),
"sections": story_record.get("sections", []),
"video_url": story_record.get("video_url", ""),
"created_at": story_record.get("created_at", "")
}
# Sort by creation date (newest first), if needed
sorted_videos = dict(
sorted(user_videos.items(), key=lambda item: item[1]["created_at"], reverse=True)
)
return jsonify({"videos": sorted_videos})
except Exception as e:
return jsonify({'error': str(e)}), 500
#download archives endpoint
# Configure lo)s")
# Ensure log file exists or create it
LOG_FILE_PATH = "story_download.log"
if not os.path.exists(LOG_FILE_PATH):
with open(LOG_FILE_PATH, 'w'):
pass # create empty file
logging.basicConfig(filename=LOG_FILE_PATH, level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
@app.route('/api/download/story_archive/<story_id>', methods=['GET'])
def download_story_archive(story_id):
try:
logging.info(f"πΉ [START] Processing story {story_id}")
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
logging.warning("β Missing or invalid token")
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
logging.warning("β Invalid or expired token")
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Fetch the Story ---
stories_ref = db.reference('stories')
story_record = stories_ref.child(story_id).get()
if not story_record:
logging.error(f"β Story {story_id} not found")
return jsonify({'error': 'Story not found'}), 404
if story_record.get('uid') != uid:
logging.warning(f"β Unauthorized access attempt for story {story_id}")
return jsonify({'error': 'Unauthorized'}), 403
full_text = story_record.get("full_story", "")
sections = story_record.get("sections", [])
split_sentences = full_text.split('.', 1)
title = split_sentences[0].strip() if split_sentences and split_sentences[0].strip() else "Untitled"
logging.info(f"π Story title: {title}")
# Create ZIP buffer
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zipf:
# =========== 1) Generate PDF ============
logging.info(f"π Generating PDF for {story_id}")
pdf = FPDF()
pdf.set_auto_page_break(auto=True, margin=15)
# --- Add and use DejaVu fonts for Unicode support ---
pdf.add_font("DejaVu", "", "DejaVuSans.ttf", uni=True)
pdf.add_font("DejaVu", "B", "dejavu-sans-bold.ttf", uni=True)
pdf.add_page()
# Use our newly added DejaVu font
pdf.set_font("DejaVu", size=12)
# Keep text within a safe width (190) so it doesn't go off-screen
max_width = 190
pdf.multi_cell(max_width, 10, f"Story ID: {story_id}")
pdf.multi_cell(max_width, 10, f"Title: {title}")
pdf.ln(10) # spacing before sections
bucket = storage.bucket()
# Add sections in PDF
for idx, section_obj in enumerate(sections):
section_text = section_obj.get("section_text", "")
image_url = section_obj.get("image_url", "")
pdf.add_page()
# Use bold font for section headers
pdf.set_font("DejaVu", "B", 14)
pdf.multi_cell(max_width, 10, f"Section {idx + 1}")
pdf.ln(5)
pdf.set_font("DejaVu", size=12)
pdf.multi_cell(max_width, 10, section_text)
pdf.ln(10)
if image_url:
try:
logging.info(f"π· Downloading image for section {idx + 1}: {image_url}")
file_path = extract_firebase_path(image_url)
if not file_path:
logging.error(f"β Could not parse image URL => {image_url}")
pdf.multi_cell(max_width, 10, "[Image URL invalid]")
continue
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
temp_img_path = temp_file.name
blob = bucket.blob(file_path)
blob.download_to_filename(temp_img_path)
# Insert the image, using full page width minus margins
pdf.image(temp_img_path, x=10, w=pdf.w - 20, h=100)
os.unlink(temp_img_path)
logging.info(f"β
Image embedded in PDF for section {idx + 1}")
except Exception as img_error:
logging.error(f"β Error embedding image for section {idx + 1}: {str(img_error)}")
pdf.multi_cell(max_width, 10, "[Image could not be included]")
# Save PDF to memory
pdf_buffer = io.BytesIO()
pdf.output(pdf_buffer)
pdf_buffer.seek(0)
zipf.writestr(f"{story_id}.pdf", pdf_buffer.read())
logging.info(f"β
PDF added to ZIP for {story_id}")
# =========== 2) Generate TXT ============
logging.info(f"π Generating TXT for {story_id}")
txt_content = f"Story ID: {story_id}\nTitle: {title}\n\nFull Story:\n\n{full_text}\n\n"
for idx, section_obj in enumerate(sections):
section_text = section_obj.get("section_text", "")
txt_content += f"\n\nSection {idx + 1}:\n{section_text}"
zipf.writestr(f"{story_id}.txt", txt_content)
logging.info(f"β
TXT added to ZIP for {story_id}")
# =========== 3) Images to ZIP ============
for idx, section_obj in enumerate(sections):
image_url = section_obj.get("image_url", "")
if image_url:
try:
logging.info(f"π· Downloading image for ZIP (section {idx + 1}): {image_url}")
file_path = extract_firebase_path(image_url)
if not file_path:
logging.error(f"β Could not parse image URL => {image_url}")
continue
blob = bucket.blob(file_path)
image_data = blob.download_as_bytes()
zipf.writestr(f"image_section_{idx + 1}.jpg", image_data)
logging.info(f"β
Image added to ZIP for section {idx + 1}")
except Exception as img_error:
logging.error(f"β Error downloading image for section {idx + 1}: {str(img_error)}")
# =========== 4) Audio to ZIP ============
for idx, section_obj in enumerate(sections):
audio_url = section_obj.get("audio_url", "")
if audio_url:
try:
logging.info(f"π Downloading audio for ZIP (section {idx + 1}): {audio_url}")
file_path = extract_firebase_path(audio_url)
if not file_path:
logging.error(f"β Could not parse audio URL => {audio_url}")
continue
blob = bucket.blob(file_path)
audio_data = blob.download_as_bytes()
zipf.writestr(f"audio_section_{idx + 1}.mp3", audio_data)
logging.info(f"β
Audio added to ZIP for section {idx + 1}")
except Exception as audio_error:
logging.error(f"β Error downloading audio for section {idx + 1}: {str(audio_error)}")
# Upload log to Firebase
log_url = upload_log()
if log_url:
logging.info(f"π€ Log uploaded: {log_url}")
else:
logging.error("β Failed to upload log")
# Serve ZIP File
zip_buffer.seek(0)
return send_file(
zip_buffer,
mimetype='application/zip',
as_attachment=True,
download_name=f"story_{story_id}.zip"
)
except Exception as e:
logging.error(f"β ERROR: {str(e)}")
traceback.print_exc()
return jsonify({'error': str(e)}), 500
def extract_firebase_path(public_url: str) -> str:
"""
Attempt to parse a Firebase/Google Storage URL, returning just the
bucket file path (e.g. "stories/ABC123/file.jpg") or "" if it fails.
"""
# 1) If it has "/o/" => parse the old way
if "/o/" in public_url and "?" in public_url:
splitted_o = public_url.split('/o/', 1)
splitted_q = splitted_o[1].split('?', 1)
return urllib.parse.unquote(splitted_q[0])
# 2) Otherwise, handle domain-based links like:
# https://storage.googleapis.com/sozo-daac1.firebasestorage.app/stories/.../file.jpg
# We'll split on domain then remove leading slash
try:
# "https://storage.googleapis.com/sozo-daac1.firebasestorage.app/stories/ABC/file.jpg"
splitted = public_url.split('/', 3) # scheme, '', domain, path
if len(splitted) < 4:
return ""
# splitted[3] = "sozo-daac1.firebasestorage.app/stories/ABC/file.jpg"
# We might need to remove "sozo-daac1.firebasestorage.app/" if present
sub_splitted = splitted[3].split('/', 1) # ["sozo-daac1.firebasestorage.app", "stories/ABC/file.jpg"]
if len(sub_splitted) < 2:
return ""
path_only = sub_splitted[1] # "stories/ABC/file.jpg"
return urllib.parse.unquote(path_only)
except Exception as e:
logging.error(f"β extract_firebase_path error: {str(e)}")
return ""
# Delete endpoints
#projects
@app.route('/api/projects/<string:story_id>', methods=['DELETE'])
def delete_project(story_id):
"""
Deletes the entire project (story) from the database (and optionally its assets in storage).
"""
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header or not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Fetch Story ---
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
return jsonify({'error': 'Story not found'}), 404
if story_data.get('uid') != uid:
return jsonify({'error': 'Unauthorized'}), 403
# --- Delete the story from DB ---
story_ref.delete()
return jsonify({'success': True, 'message': f'Story {story_id} deleted.'}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
#videos
@app.route('/api/videos/<string:story_id>', methods=['DELETE'])
def delete_video(story_id):
"""
Deletes only the video from a project (removes video_url from DB),
optionally deleting the file in storage.
"""
try:
# --- Authentication ---
auth_header = request.headers.get('Authorization', '')
if not auth_header or not auth_header.startswith('Bearer '):
return jsonify({'error': 'Missing or invalid token'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid or expired token'}), 401
# --- Fetch Story ---
story_ref = db.reference(f"stories/{story_id}")
story_data = story_ref.get()
if not story_data:
return jsonify({'error': 'Story not found'}), 404
if story_data.get('uid') != uid:
return jsonify({'error': 'Unauthorized'}), 403
video_url = story_data.get('video_url')
if not video_url:
return jsonify({'error': 'No video to delete'}), 400
# Delete the video file from Firebase Storage
bucket = storage.bucket()
file_path = extract_firebase_path(video_url)
if file_path:
blob = bucket.blob(file_path)
blob.delete()
# --- Remove video_url from DB ---
story_ref.update({'video_url': None})
return jsonify({'success': True, 'message': f'Video removed from story {story_id}.'}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
# ---------- Credit Request Endpoints ----------
@app.route('/api/user/request-credits', methods=['POST'])
def request_credits():
try:
auth_header = request.headers.get('Authorization', '')
if not auth_header.startswith('Bearer '):
return jsonify({'error': 'Authorization header missing or malformed'}), 401
token = auth_header.split(' ')[1]
uid = verify_token(token)
if not uid:
return jsonify({'error': 'Invalid token'}), 401
data = request.get_json()
requested_credits = data.get('requested_credits')
if requested_credits is None:
return jsonify({'error': 'requested_credits is required'}), 400
# Create a credit request entry
credit_request_ref = db.reference('credit_requests').push()
credit_request_ref.set({
'user_id': uid,
'requested_credits': requested_credits,
'status': 'pending',
'requested_at': datetime.utcnow().isoformat()
})
return jsonify({'success': True, 'request_id': credit_request_ref.key})
except Exception as e:
return jsonify({'error': str(e)}), 500
# ---------- Admin Endpoints for Credit Requests ----------
# admin profile
@app.route('/api/admin/profile', methods=['GET'])
def get_admin_profile():
try:
auth_header = request.headers.get('Authorization', '')
print("Received Auth Header (admin):", auth_header) # Debugging
# Use verify_admin to ensure the caller is an admin.
admin_uid = verify_admin(auth_header)
if not admin_uid:
return jsonify({'error': 'Unauthorized: Admin access required'}), 401
# Get the admin's profile data.
admin_data = db.reference(f'users/{admin_uid}').get()
print("Fetched Admin Data:", admin_data) # Debugging
if not admin_data:
return jsonify({'error': 'Admin user not found'}), 404
# Fetch all users data from Firebase.
all_users_data = db.reference('users').get() or {}
total_users = len(all_users_data)
# Filter out admin users for normal user stats.
normal_users_data = [user for user in all_users_data.values() if not user.get('is_admin', False)]
total_normal_users = len(normal_users_data)
# Sum credits for all users (or normal users only).
total_current_credits = sum(user.get('credits', 0) for user in all_users_data.values())
total_normal_current_credits = sum(user.get('credits', 0) for user in normal_users_data)
# Calculate initial credits (assuming normal users start with 3 credits).
total_initial_credits = total_normal_users * 3
# Credit usage is how many credits have been spent by normal users.
credit_usage = total_initial_credits - total_normal_current_credits
return jsonify({
'uid': admin_uid,
'email': admin_data.get('email'),
'credits': admin_data.get('credits', 0),
'is_admin': True,
'aggregated_stats': {
'total_users': total_users,
'total_normal_users': total_normal_users,
'total_current_credits': total_current_credits,
'total_normal_current_credits': total_normal_current_credits,
'total_initial_credits_normal_users': total_initial_credits,
'credit_usage': credit_usage
}
})
except Exception as e:
print(f"Error fetching admin profile: {str(e)}")
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/credit_requests', methods=['GET'])
def list_credit_requests():
try:
verify_admin(request.headers.get('Authorization', ''))
requests_ref = db.reference('credit_requests')
credit_requests = requests_ref.get() or {}
# Convert dict to list with id
requests_list = [{'id': req_id, **data} for req_id, data in credit_requests.items()]
return jsonify({'credit_requests': requests_list})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/credit_requests/<string:request_id>', methods=['PUT'])
def process_credit_request(request_id):
try:
admin_uid = verify_admin(request.headers.get('Authorization', ''))
req_ref = db.reference(f'credit_requests/{request_id}')
req_data = req_ref.get()
if not req_data:
return jsonify({'error': 'Credit request not found'}), 404
data = request.get_json()
decision = data.get('decision')
if decision not in ['approved', 'declined']:
return jsonify({'error': 'decision must be "approved" or "declined"'}), 400
# If approved, add credits to the user
if decision == 'approved':
user_ref = db.reference(f'users/{req_data["user_id"]}')
user_data = user_ref.get()
if not user_data:
return jsonify({'error': 'User not found'}), 404
new_total = user_data.get('credits', 0) + float(req_data.get('requested_credits', 0))
user_ref.update({'credits': new_total})
req_ref.update({
'status': 'approved',
'processed_by': admin_uid,
'processed_at': datetime.utcnow().isoformat()
})
return jsonify({'success': True, 'new_user_credits': new_total})
else:
req_ref.update({
'status': 'declined',
'processed_by': admin_uid,
'processed_at': datetime.utcnow().isoformat()
})
return jsonify({'success': True, 'message': 'Credit request declined'})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/users', methods=['GET'])
def admin_list_users():
try:
verify_admin(request.headers.get('Authorization', ''))
users_ref = db.reference('users')
all_users = users_ref.get() or {}
# Convert dict to a list of { uid, ...data }
user_list = []
for uid, user_data in all_users.items():
user_list.append({
'uid': uid,
'email': user_data.get('email'),
'credits': user_data.get('credits', 0),
'is_admin': user_data.get('is_admin', False),
'created_at': user_data.get('created_at', ''),
'suspended': user_data.get('suspended', False)
})
return jsonify({'users': user_list}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/users/search', methods=['GET'])
def admin_search_users():
try:
verify_admin(request.headers.get('Authorization', ''))
email_query = request.args.get('email', '').lower().strip()
if not email_query:
return jsonify({'error': 'email query param is required'}), 400
users_ref = db.reference('users')
all_users = users_ref.get() or {}
matched_users = []
for uid, user_data in all_users.items():
user_email = user_data.get('email', '').lower()
# If you want partial match, do `if email_query in user_email:`
# If you want exact match, do `if user_email == email_query:`
if email_query in user_email:
matched_users.append({
'uid': uid,
'email': user_data.get('email'),
'credits': user_data.get('credits', 0),
'is_admin': user_data.get('is_admin', False),
'created_at': user_data.get('created_at', ''),
'suspended': user_data.get('suspended', False)
})
return jsonify({'matched_users': matched_users}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/users/<string:uid>/suspend', methods=['PUT'])
def admin_suspend_user(uid):
try:
verify_admin(request.headers.get('Authorization', ''))
data = request.get_json()
action = data.get('action') # "suspend" or "unsuspend"
if action not in ["suspend", "unsuspend"]:
return jsonify({'error': 'action must be "suspend" or "unsuspend"'}), 400
user_ref = db.reference(f'users/{uid}')
user_data = user_ref.get()
if not user_data:
return jsonify({'error': 'User not found'}), 404
if action == "suspend":
user_ref.update({'suspended': True})
else:
user_ref.update({'suspended': False})
return jsonify({'success': True, 'message': f'User {uid} is now {action}ed'})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/stories', methods=['GET'])
def admin_list_stories():
try:
verify_admin(request.headers.get('Authorization', ''))
stories_ref = db.reference('stories')
all_stories = stories_ref.get() or {}
total_stories = len(all_stories)
# Fetch all users' data to map UIDs to emails
users_ref = db.reference('users')
users_data = users_ref.get() or {}
# If you want to see how many stories each user has, do:
stories_per_user = {}
for sid, sdata in all_stories.items():
user_id = sdata.get('uid')
if user_id:
user_email = users_data.get(user_id, {}).get('email', 'Unknown')
stories_per_user[user_email] = stories_per_user.get(user_email, 0) + 1
return jsonify({
'total_stories': total_stories,
'stories_per_user': stories_per_user # Now contains emails instead of UIDs
}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/notifications', methods=['POST'])
def send_notifications():
"""
Admin sends notifications to one, multiple, or all users.
"""
try:
# 1) Verify admin
admin_uid = verify_admin(request.headers.get('Authorization', ''))
if not admin_uid:
return jsonify({'error': 'Unauthorized: Admin access required'}), 401
# 2) Parse request data
data = request.get_json()
message = data.get('message')
if not message:
return jsonify({'error': 'message is required'}), 400
# 'recipients' can be a single user_id, a list of user_ids, or "all"
recipients = data.get('recipients', "all")
# 3) If recipients == "all", get all user IDs
all_users_ref = db.reference('users')
all_users_data = all_users_ref.get() or {}
user_ids_to_notify = []
if recipients == "all":
user_ids_to_notify = list(all_users_data.keys())
elif isinstance(recipients, list):
# Filter out invalid user IDs
user_ids_to_notify = [uid for uid in recipients if uid in all_users_data]
elif isinstance(recipients, str):
# Could be a single user_id if not "all"
if recipients in all_users_data:
user_ids_to_notify = [recipients]
else:
return jsonify({'error': 'Invalid single user_id'}), 400
else:
return jsonify({'error': 'recipients must be "all", a user_id, or a list of user_ids'}), 400
# 4) Create a "notifications" node for each user
# E.g., notifications/{user_id}/{notification_id}
now_str = datetime.utcnow().isoformat()
for user_id in user_ids_to_notify:
notif_id = str(uuid.uuid4())
notif_ref = db.reference(f'notifications/{user_id}/{notif_id}')
notif_data = {
"from_admin": admin_uid,
"message": message,
"created_at": now_str,
"read": False
}
notif_ref.set(notif_data)
return jsonify({
'success': True,
'message': f"Notification sent to {len(user_ids_to_notify)} user(s)."
}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/admin/feedback', methods=['GET'])
def admin_view_feedback():
"""
Allows an admin to view all feedback entries (or optionally filter by type/status).
"""
try:
# 1) Verify admin
admin_uid = verify_admin(request.headers.get('Authorization', ''))
if not admin_uid:
return jsonify({'error': 'Unauthorized: Admin access required'}), 401
# 2) Optional: parse query params for filtering, e.g. ?type=bug or ?status=open
feedback_type = request.args.get('type') # e.g. "bug", "feature_request", "general"
feedback_status = request.args.get('status') # e.g. "open", "resolved"
# 3) Fetch all feedback
feedback_ref = db.reference('feedback')
all_feedback = feedback_ref.get() or {}
# Convert dict to list
feedback_list = []
for fb_id, fb_data in all_feedback.items():
# If we have a filter for type
if feedback_type and fb_data.get('type') != feedback_type:
continue
# If we have a filter for status
if feedback_status and fb_data.get('status') != feedback_status:
continue
feedback_list.append({
'feedback_id': fb_id,
'user_id': fb_data.get('user_id'),
'user_email': fb_data.get('user_email'),
'type': fb_data.get('type', 'general'),
'message': fb_data.get('message', ''),
'created_at': fb_data.get('created_at'),
'status': fb_data.get('status', 'open')
})
return jsonify({'feedback': feedback_list}), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
# ---------- Admin Endpoint to Directly Update Credits ----------
@app.route('/api/admin/users/<string:uid>/credits', methods=['PUT'])
def admin_update_credits(uid):
try:
verify_admin(request.headers.get('Authorization', ''))
data = request.get_json()
add_credits = data.get('add_credits')
if add_credits is None:
return jsonify({'error': 'add_credits is required'}), 400
user_ref = db.reference(f'users/{uid}')
user_data = user_ref.get()
if not user_data:
return jsonify({'error': 'User not found'}), 404
new_total = user_data.get('credits', 0) + float(add_credits)
user_ref.update({'credits': new_total})
return jsonify({'success': True, 'new_total_credits': new_total})
except Exception as e:
return jsonify({'error': str(e)}), 500
# ---------- Main ----------
if __name__ == '__main__':
# Create dummy admin account if it doesn't exist
#create_dummy_admin()
app.run(debug=True, host="0.0.0.0", port=7860) |