File size: 86,504 Bytes
225a75e 64e1704 225a75e 83178da 225a75e 64e1704 225a75e 9d3b3f4 225a75e 83178da 225a75e 7ef24ef 225a75e 43ce1e1 7ef24ef 83178da 7ef24ef 225a75e 7ef24ef 82b80c0 43ce1e1 82b80c0 7ef24ef 82b80c0 225a75e 7ef24ef 82b80c0 225a75e 7ef24ef 225a75e 43ce1e1 7ef24ef 43ce1e1 225a75e 83178da 225a75e b55bafd 225a75e b55bafd 225a75e b55bafd 225a75e 7ef24ef 225a75e 7ef24ef 225a75e 64e1704 1e17e7f 3a3e679 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 1e17e7f da22b37 3a3e679 1e17e7f da22b37 1e17e7f 3a3e679 1e17e7f 83178da da22b37 83178da 7ef24ef 83178da 7ef24ef 83178da 7ef24ef 83178da da22b37 fc0aaf2 da22b37 1e17e7f da22b37 fc0aaf2 da22b37 fc0aaf2 da22b37 1e17e7f 7ef24ef 1e17e7f da22b37 7ef24ef fc0aaf2 7ef24ef 83178da fc0aaf2 7ef24ef 1e17e7f da22b37 1e17e7f 3a3e679 1e17e7f da22b37 fc0aaf2 da22b37 fc0aaf2 da22b37 3a3e679 da22b37 1e17e7f 7ef24ef 1e17e7f 3a3e679 1e17e7f 7ef24ef 1e17e7f 7ef24ef f477d08 1e17e7f 7ef24ef 1e17e7f 3a3e679 1e17e7f 7ef24ef 1e17e7f 3a3e679 da22b37 fc0aaf2 da22b37 1e17e7f 7ef24ef f477d08 da22b37 f477d08 7ef24ef 1e17e7f 225a75e 7ef24ef 8430ff4 225a75e 83178da 8430ff4 fc0aaf2 95e7104 225a75e 8430ff4 83178da 7ef24ef 83178da 225a75e 8430ff4 43ce1e1 65443cb 8430ff4 65443cb 8430ff4 65443cb 7ef24ef 8430ff4 65443cb 8430ff4 225a75e 7ef24ef 8430ff4 83178da 8430ff4 225a75e 8430ff4 225a75e 7ef24ef 225a75e 7ef24ef 83178da 43ce1e1 225a75e 7ef24ef 8430ff4 7ef24ef 225a75e 7ef24ef 225a75e 83178da 225a75e 83178da 225a75e 83178da 225a75e 83178da 225a75e 8430ff4 225a75e 8430ff4 225a75e 8430ff4 225a75e 83178da 225a75e 8430ff4 225a75e 8430ff4 225a75e 8430ff4 225a75e 0b92da3 225a75e 83178da 0b92da3 83178da 225a75e 8430ff4 225a75e 8430ff4 225a75e 8430ff4 225a75e 83178da 8430ff4 225a75e 8430ff4 225a75e 83178da 225a75e 8430ff4 225a75e 83178da 225a75e 8430ff4 225a75e 83178da 225a75e 8430ff4 225a75e 83178da 225a75e 7ef24ef 225a75e a248c93 85c44e6 a248c93 0a7dde2 85c44e6 a248c93 85c44e6 a248c93 0a7dde2 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 65443cb 0a7dde2 65443cb 0a7dde2 85c44e6 65443cb 0a7dde2 65443cb 0a7dde2 a248c93 0a7dde2 65443cb 0a7dde2 65443cb 0a7dde2 65443cb 0a7dde2 65443cb 85c44e6 65443cb a248c93 0a7dde2 65443cb 85c44e6 65443cb a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 0a7dde2 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 d57fa7d a248c93 85c44e6 a248c93 85c44e6 a248c93 85c44e6 a248c93 0a7dde2 85c44e6 65443cb 85c44e6 0a7dde2 65443cb 225a75e f477d08 225a75e 7ef24ef 225a75e 7ef24ef 225a75e 7ef24ef 225a75e 7ef24ef 225a75e 7ef24ef 65443cb 7ef24ef 225a75e 1e17e7f f477d08 6f7648f f477d08 6f7648f f477d08 6f7648f f477d08 1e17e7f 6f7648f 95e7104 225a75e 6f7648f 225a75e 6f7648f 225a75e 83178da 225a75e 7ef24ef 225a75e 748b763 225a75e 748b763 225a75e 6f7648f f798fcd 6f7648f f798fcd 6f7648f f798fcd 6f7648f f798fcd 6f7648f 83178da 6f7648f da22b37 6f7648f 0b92da3 6f7648f da22b37 0b92da3 6f7648f da22b37 6f7648f da22b37 6f7648f da22b37 6f7648f da22b37 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6f7648f 8430ff4 6dce4fa 6f7648f 6dce4fa 225a75e 6f7648f 1e17e7f 6dce4fa 95e7104 6f7648f da22b37 225a75e 6f7648f 7ef24ef 6f7648f 7ef24ef 6f7648f 7ef24ef 6f7648f 7ef24ef 225a75e 6f7648f 7ef24ef 6f7648f 7ef24ef 225a75e 7ef24ef 225a75e 7ef24ef 6f7648f 7ef24ef 225a75e 7ef24ef e107ea2 225a75e e107ea2 7ef24ef 225a75e 7ef24ef 225a75e 7ef24ef e107ea2 7ef24ef a178cd6 7ef24ef 225a75e 446334b 225a75e 446334b 225a75e 446334b 225a75e f477d08 225a75e f477d08 446334b 225a75e 446334b f477d08 446334b f477d08 225a75e 446334b 225a75e 446334b f477d08 446334b 225a75e f477d08 225a75e 64e1704 225a75e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 |
#!/usr/bin/env python3
"""
GAIA Agent Production Interface
Production-ready Gradio app for the GAIA benchmark agent system with Unit 4 API integration
"""
import os
import gradio as gr
import logging
import time
import requests
import pandas as pd
from typing import Optional, Tuple, Dict, Any
import tempfile
from pathlib import Path
import json
from datetime import datetime
import csv
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Import our workflow
from workflow.gaia_workflow import SimpleGAIAWorkflow, create_gaia_workflow
from models.qwen_client import QwenClient
from agents.state import GAIAAgentState
# Constants for Unit 4 API
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
class GAIAResultLogger:
"""
Logger for GAIA evaluation results with export functionality
"""
def __init__(self):
self.results_dir = Path("results")
self.results_dir.mkdir(exist_ok=True)
def log_evaluation_results(self, username: str, questions_data: list, results_log: list,
final_result: dict, execution_time: float) -> dict:
"""
Log complete evaluation results to multiple formats
Returns paths to generated files
"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
base_filename = f"gaia_evaluation_{username}_{timestamp}"
files_created = {}
try:
# 1. CSV Export (for easy sharing)
csv_path = self.results_dir / f"{base_filename}.csv"
self._save_csv_results(csv_path, results_log, final_result)
files_created["csv"] = str(csv_path)
# 2. Detailed JSON Export
json_path = self.results_dir / f"{base_filename}.json"
detailed_results = self._create_detailed_results(
username, questions_data, results_log, final_result, execution_time, timestamp
)
self._save_json_results(json_path, detailed_results)
files_created["json"] = str(json_path)
# 3. Summary Report
summary_path = self.results_dir / f"{base_filename}_summary.md"
self._save_summary_report(summary_path, detailed_results)
files_created["summary"] = str(summary_path)
logger.info(f"β
Results logged to {len(files_created)} files: {list(files_created.keys())}")
except Exception as e:
logger.error(f"β Error logging results: {e}")
files_created["error"] = str(e)
return files_created
def _save_csv_results(self, path: Path, results_log: list, final_result: dict):
"""Save results in CSV format for easy sharing"""
with open(path, 'w', newline='', encoding='utf-8') as csvfile:
if not results_log:
return
fieldnames = list(results_log[0].keys()) + ['Correct', 'Score']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Header
writer.writeheader()
# Add overall results info
score = final_result.get('score', 'N/A')
correct_count = final_result.get('correct_count', 'N/A')
total_attempted = final_result.get('total_attempted', len(results_log))
# Write each result
for i, row in enumerate(results_log):
row_data = row.copy()
row_data['Correct'] = 'Unknown' # We don't get individual correct/incorrect from API
row_data['Score'] = f"{score}% ({correct_count}/{total_attempted})" if i == 0 else ""
writer.writerow(row_data)
def _create_detailed_results(self, username: str, questions_data: list, results_log: list,
final_result: dict, execution_time: float, timestamp: str) -> dict:
"""Create comprehensive results dictionary"""
return {
"metadata": {
"username": username,
"timestamp": timestamp,
"execution_time_seconds": execution_time,
"total_questions": len(questions_data),
"total_processed": len(results_log),
"system_info": {
"gradio_version": "4.44.0",
"python_version": "3.x",
"space_id": os.getenv("SPACE_ID", "local"),
"space_host": os.getenv("SPACE_HOST", "local")
}
},
"evaluation_results": {
"overall_score": final_result.get('score', 'N/A'),
"correct_count": final_result.get('correct_count', 'N/A'),
"total_attempted": final_result.get('total_attempted', len(results_log)),
"success_rate": f"{final_result.get('score', 0)}%",
"api_message": final_result.get('message', 'No message'),
"submission_successful": 'score' in final_result
},
"question_details": [
{
"index": i + 1,
"task_id": item.get("task_id"),
"question": item.get("question"),
"level": item.get("Level", "Unknown"),
"file_name": item.get("file_name", ""),
"submitted_answer": next(
(r["Submitted Answer"] for r in results_log if r.get("Task ID") == item.get("task_id")),
"No answer"
),
"question_length": len(item.get("question", "")),
"answer_length": len(next(
(r["Submitted Answer"] for r in results_log if r.get("Task ID") == item.get("task_id")),
""
))
}
for i, item in enumerate(questions_data)
],
"processing_summary": {
"questions_by_level": self._analyze_questions_by_level(questions_data),
"questions_with_files": len([q for q in questions_data if q.get("file_name")]),
"average_question_length": sum(len(q.get("question", "")) for q in questions_data) / len(questions_data) if questions_data else 0,
"average_answer_length": sum(len(r.get("Submitted Answer", "")) for r in results_log) / len(results_log) if results_log else 0,
"processing_time_per_question": execution_time / len(results_log) if results_log else 0
},
"raw_results_log": results_log,
"api_response": final_result
}
def _analyze_questions_by_level(self, questions_data: list) -> dict:
"""Analyze question distribution by level"""
level_counts = {}
for q in questions_data:
level = q.get("Level", "Unknown")
level_counts[level] = level_counts.get(level, 0) + 1
return level_counts
def _save_json_results(self, path: Path, detailed_results: dict):
"""Save detailed results in JSON format"""
with open(path, 'w', encoding='utf-8') as jsonfile:
json.dump(detailed_results, jsonfile, indent=2, ensure_ascii=False)
def _save_summary_report(self, path: Path, detailed_results: dict):
"""Save human-readable summary report"""
metadata = detailed_results["metadata"]
results = detailed_results["evaluation_results"]
summary = detailed_results["processing_summary"]
report = f"""# GAIA Agent Evaluation Report
## Summary
- **User**: {metadata['username']}
- **Date**: {metadata['timestamp']}
- **Overall Score**: {results['overall_score']}% ({results['correct_count']}/{results['total_attempted']} correct)
- **Execution Time**: {metadata['execution_time_seconds']:.2f} seconds
- **Submission Status**: {'β
Success' if results['submission_successful'] else 'β Failed'}
## Question Analysis
- **Total Questions**: {metadata['total_questions']}
- **Successfully Processed**: {metadata['total_processed']}
- **Questions with Files**: {summary['questions_with_files']}
- **Average Question Length**: {summary['average_question_length']:.0f} characters
- **Average Answer Length**: {summary['average_answer_length']:.0f} characters
- **Processing Time per Question**: {summary['processing_time_per_question']:.2f} seconds
## Questions by Level
"""
for level, count in summary['questions_by_level'].items():
report += f"- **Level {level}**: {count} questions\n"
report += f"""
## API Response
{results['api_message']}
## System Information
- **Space ID**: {metadata['system_info']['space_id']}
- **Space Host**: {metadata['system_info']['space_host']}
- **Gradio Version**: {metadata['system_info']['gradio_version']}
---
*Report generated automatically by GAIA Agent System*
"""
with open(path, 'w', encoding='utf-8') as f:
f.write(report)
def get_latest_results(self, username: str = None) -> list:
"""Get list of latest result files"""
pattern = f"gaia_evaluation_{username}_*" if username else "gaia_evaluation_*"
files = list(self.results_dir.glob(pattern))
files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
return files[:10] # Return 10 most recent
class GAIAAgentApp:
"""Production GAIA Agent Application with LangGraph workflow and Qwen models"""
def __init__(self, hf_token: Optional[str] = None):
"""Initialize the application with LangGraph workflow and Qwen models only"""
# Priority order: 1) passed hf_token, 2) HF_TOKEN env var
if not hf_token:
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
raise ValueError("HuggingFace token with inference permissions is required. Please set HF_TOKEN environment variable or login with full access.")
try:
# Initialize QwenClient with token
from models.qwen_client import QwenClient
self.llm_client = QwenClient(hf_token=hf_token)
# Initialize LangGraph workflow with tools
self.workflow = SimpleGAIAWorkflow(self.llm_client)
self.initialized = True
logger.info("β
GAIA Agent system initialized with LangGraph workflow and Qwen models")
except Exception as e:
logger.error(f"β Failed to initialize GAIA Agent system: {e}")
raise RuntimeError(f"System initialization failed: {e}. Please ensure HF_TOKEN has inference permissions.")
@classmethod
def create_with_oauth_token(cls, oauth_token: str) -> "GAIAAgentApp":
"""Create a new instance with OAuth token"""
if not oauth_token:
raise ValueError("Valid OAuth token is required for GAIA Agent initialization")
return cls(hf_token=oauth_token)
def __call__(self, question: str) -> str:
"""
Main agent call for Unit 4 API compatibility
"""
if not self.initialized:
return "System not initialized"
try:
result_state = self.workflow.process_question(
question=question,
task_id=f"unit4_{hash(question) % 10000}"
)
# Return the final answer for API submission
return result_state.final_answer if result_state.final_answer else "Unable to process question"
except Exception as e:
logger.error(f"Error processing question: {e}")
return f"Processing error: {str(e)}"
def process_question_detailed(self, question: str, file_input=None, show_reasoning: bool = False) -> Tuple[str, str, str]:
"""
Process a question through the GAIA agent system with detailed output
Returns:
Tuple of (answer, details, reasoning)
"""
if not self.initialized:
return "β System not initialized", "", ""
if not question.strip():
return "β Please provide a question", "", ""
start_time = time.time()
# Handle file upload
file_path = None
file_name = None
if file_input is not None:
file_path = file_input.name
file_name = os.path.basename(file_path)
try:
# Process through workflow
result_state = self.workflow.process_question(
question=question,
file_path=file_path,
file_name=file_name,
task_id=f"manual_{hash(question) % 10000}"
)
processing_time = time.time() - start_time
# Format answer
answer = result_state.final_answer
if not answer:
answer = "Unable to process question - no answer generated"
# Format details
details = self._format_details(result_state, processing_time)
# Format reasoning (if requested)
reasoning = ""
if show_reasoning:
reasoning = self._format_reasoning(result_state)
return answer, details, reasoning
except Exception as e:
error_msg = f"Processing failed: {str(e)}"
logger.error(error_msg)
return f"β {error_msg}", "Please try again or contact support", ""
def _format_details(self, state, processing_time: float) -> str:
"""Format processing details"""
details = []
# Basic info
details.append(f"π― **Question Type**: {state.question_type.value}")
details.append(f"β‘ **Processing Time**: {processing_time:.2f}s")
details.append(f"π **Confidence**: {state.final_confidence:.2f}")
details.append(f"π° **Cost**: ${state.total_cost:.4f}")
# Agents used
agents_used = [result.agent_role.value for result in state.agent_results]
details.append(f"π€ **Agents Used**: {', '.join(agents_used) if agents_used else 'None'}")
# Tools used
tools_used = []
for result in state.agent_results:
tools_used.extend(result.tools_used)
unique_tools = list(set(tools_used))
details.append(f"π§ **Tools Used**: {', '.join(unique_tools) if unique_tools else 'None'}")
# File processing
if state.file_name:
details.append(f"π **File Processed**: {state.file_name}")
# Quality indicators
if state.confidence_threshold_met:
details.append("β
**Quality**: High confidence")
elif state.final_confidence > 0.5:
details.append("β οΈ **Quality**: Medium confidence")
else:
details.append("β **Quality**: Low confidence")
# Review status
if state.requires_human_review:
details.append("ποΈ **Review**: Human review recommended")
# Error count
if state.error_messages:
details.append(f"β οΈ **Errors**: {len(state.error_messages)} encountered")
return "\n".join(details)
def _format_reasoning(self, state) -> str:
"""Format detailed reasoning and workflow steps"""
reasoning = []
# Routing decision
reasoning.append("## π§ Routing Decision")
reasoning.append(f"**Classification**: {state.question_type.value}")
reasoning.append(f"**Selected Agents**: {[a.value for a in state.selected_agents]}")
reasoning.append(f"**Reasoning**: {state.routing_decision}")
reasoning.append("")
# Agent results
reasoning.append("## π€ Agent Processing")
for i, result in enumerate(state.agent_results, 1):
reasoning.append(f"### Agent {i}: {result.agent_role.value}")
reasoning.append(f"**Success**: {'β
' if result.success else 'β'}")
reasoning.append(f"**Confidence**: {result.confidence:.2f}")
reasoning.append(f"**Tools Used**: {', '.join(result.tools_used) if result.tools_used else 'None'}")
reasoning.append(f"**Reasoning**: {result.reasoning}")
reasoning.append(f"**Result**: {result.result[:200]}...")
reasoning.append("")
# Synthesis process
reasoning.append("## π Synthesis Process")
reasoning.append(f"**Strategy**: {state.answer_source}")
reasoning.append(f"**Final Reasoning**: {state.final_reasoning}")
reasoning.append("")
# Processing timeline
reasoning.append("## β±οΈ Processing Timeline")
for i, step in enumerate(state.processing_steps, 1):
reasoning.append(f"{i}. {step}")
return "\n".join(reasoning)
def get_examples(self) -> list:
"""Get example questions for the interface that showcase multi-agent capabilities"""
return [
"How many studio albums were published by Mercedes Sosa between 2000 and 2009?",
"What is the capital of the country that has the most time zones?",
"Calculate the compound interest on $1000 at 5% annual rate compounded quarterly for 3 years",
"What is the square root of the sum of the first 10 prime numbers?",
"Who was the first person to walk on the moon and what year did it happen?",
"Compare the GDP of Japan and Germany in 2023 and tell me the difference",
]
def process_with_langgraph(self, question: str, question_id: str = None) -> Dict[str, Any]:
"""
Process question using enhanced LangGraph workflow with multi-phase planning
"""
try:
logger.info(f"π Processing question with enhanced LangGraph workflow: {question[:100]}...")
# Create enhanced state with proper initialization
state = GAIAAgentState(
question=question,
question_id=question_id,
file_name=None, # File handling would be added here if needed
file_content=None
)
# Create enhanced workflow with multi-step planning
workflow = create_gaia_workflow(self.llm_client, self.tools)
logger.info("π Starting enhanced multi-phase workflow execution")
# Execute workflow with enhanced planning and refinement
result_state = workflow.invoke(state)
# Extract enhanced results
processing_details = {
"steps": result_state.processing_steps,
"agents_used": [r.agent_role.value for r in result_state.agent_results],
"router_analysis": getattr(result_state, 'router_analysis', {}),
"agent_sequence": getattr(result_state, 'agent_sequence', []),
"total_steps": len(result_state.processing_steps),
"refinement_attempted": getattr(result_state, 'refinement_attempted', False)
}
# Calculate enhanced confidence based on multi-agent results
if result_state.agent_results:
confidences = [r.confidence for r in result_state.agent_results]
avg_confidence = sum(confidences) / len(confidences)
max_confidence = max(confidences)
# Boost confidence for multi-agent consensus
enhanced_confidence = min(0.95, (avg_confidence + max_confidence) / 2)
else:
enhanced_confidence = 0.1
return {
"answer": result_state.final_answer or "Unable to determine answer",
"confidence": enhanced_confidence,
"reasoning": result_state.synthesis_reasoning or "Multi-phase processing completed",
"cost": result_state.total_cost,
"processing_time": time.time() - result_state.start_time,
"processing_details": processing_details,
"agent_results": [
{
"agent": r.agent_role.value,
"success": r.success,
"confidence": r.confidence,
"reasoning": r.reasoning[:200] + "..." if len(r.reasoning) > 200 else r.reasoning,
"processing_time": r.processing_time,
"cost": r.cost_estimate
}
for r in result_state.agent_results
],
"errors": result_state.errors
}
except Exception as e:
error_msg = f"Enhanced LangGraph processing failed: {str(e)}"
logger.error(error_msg)
return {
"answer": "Processing failed with enhanced workflow",
"confidence": 0.0,
"reasoning": error_msg,
"cost": 0.0,
"processing_time": 0.0,
"processing_details": {"error": error_msg},
"agent_results": [],
"errors": [error_msg]
}
def check_oauth_scopes(oauth_token: str) -> Dict[str, any]:
"""
Check what scopes are available with the OAuth token
Returns a dictionary with scope information and capabilities
"""
if not oauth_token:
return {
"logged_in": False,
"scopes": [],
"can_inference": False,
"can_read": False,
"user_info": {},
"message": "Not logged in"
}
try:
headers = {"Authorization": f"Bearer {oauth_token}"}
# Test whoami endpoint (requires read scope)
logger.info("π Testing OAuth token with whoami endpoint...")
try:
whoami_response = requests.get("https://huggingface.co/api/whoami", headers=headers, timeout=10)
can_read = whoami_response.status_code == 200
logger.info(f"β
Whoami response: {whoami_response.status_code}")
if whoami_response.status_code == 401:
logger.warning("β οΈ OAuth token unauthorized for whoami endpoint")
elif whoami_response.status_code != 200:
logger.warning(f"β οΈ Unexpected whoami response: {whoami_response.status_code}")
except Exception as whoami_error:
logger.error(f"β Whoami test failed: {whoami_error}")
can_read = False
# Test inference capability by trying a simple model call
logger.info("π Testing OAuth token with inference endpoint...")
can_inference = False
try:
# Try a very simple inference call to test scope
inference_url = "https://api-inference.huggingface.co/models/microsoft/DialoGPT-medium"
test_payload = {"inputs": "test", "options": {"wait_for_model": False, "use_cache": True}}
inference_response = requests.post(inference_url, headers=headers, json=test_payload, timeout=15)
# 200 = success, 503 = model loading (but scope works), 401/403 = no scope
can_inference = inference_response.status_code in [200, 503]
logger.info(f"β
Inference response: {inference_response.status_code}")
if inference_response.status_code == 401:
logger.warning("β οΈ OAuth token unauthorized for inference endpoint - likely missing 'inference' scope")
elif inference_response.status_code == 403:
logger.warning("β οΈ OAuth token forbidden for inference endpoint - insufficient permissions")
elif inference_response.status_code not in [200, 503]:
logger.warning(f"β οΈ Unexpected inference response: {inference_response.status_code}")
except Exception as inference_error:
logger.error(f"β Inference test failed: {inference_error}")
can_inference = False
# Alternative inference test - try Qwen model directly
if not can_inference:
logger.info("π Testing OAuth token with Qwen model directly...")
try:
qwen_url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-7B-Instruct"
qwen_payload = {"inputs": "Hello", "options": {"wait_for_model": False}}
qwen_response = requests.post(qwen_url, headers=headers, json=qwen_payload, timeout=15)
qwen_inference = qwen_response.status_code in [200, 503]
if qwen_inference:
can_inference = True
logger.info(f"β
Qwen model response: {qwen_response.status_code}")
else:
logger.warning(f"β οΈ Qwen model response: {qwen_response.status_code}")
except Exception as qwen_error:
logger.error(f"β Qwen model test failed: {qwen_error}")
# Determine probable scopes based on capabilities
probable_scopes = []
if can_read:
probable_scopes.append("read")
if can_inference:
probable_scopes.append("inference")
logger.info(f"π Final scope assessment: {probable_scopes}")
# Get user info if available
user_info = {}
if can_read and whoami_response.status_code == 200:
try:
user_data = whoami_response.json()
user_info = {
"name": user_data.get("name", "Unknown"),
"fullname": user_data.get("fullName", ""),
"avatar": user_data.get("avatarUrl", "")
}
logger.info(f"β
User info retrieved: {user_info.get('name', 'unknown')}")
except Exception as user_error:
logger.warning(f"β οΈ Could not parse user info: {user_error}")
user_info = {}
return {
"logged_in": True,
"scopes": probable_scopes,
"can_inference": can_inference,
"can_read": can_read,
"user_info": user_info,
"message": f"Logged in with scopes: {', '.join(probable_scopes) if probable_scopes else 'limited'}"
}
except Exception as e:
logger.error(f"β OAuth scope check failed: {e}")
return {
"logged_in": True,
"scopes": ["unknown"],
"can_inference": False,
"can_read": False,
"user_info": {},
"message": f"Could not determine scopes: {str(e)}"
}
def format_auth_status(profile: gr.OAuthProfile | None) -> str:
"""Format authentication status for display in UI"""
# Check for HF_TOKEN first (best performance)
hf_token = os.getenv("HF_TOKEN")
if hf_token:
# HF_TOKEN is available - this is the best case scenario
return """
### π― Authentication Status: HF_TOKEN Environment Variable
**π FULL SYSTEM CAPABILITIES ENABLED**
**Authentication Source**: HF_TOKEN environment variable
**Model Access**: Qwen 2.5 models (7B/32B/72B) via HuggingFace Inference API
**Workflow**: LangGraph multi-agent system with specialized tools
**Available Features:**
- β
**Advanced Model Access**: Full Qwen model capabilities (7B/32B/72B)
- β
**High Performance**: 30%+ expected GAIA score
- β
**LangGraph Workflow**: Multi-agent orchestration with synthesis
- β
**Specialized Agents**: Web research, file processing, mathematical reasoning
- β
**Professional Tools**: Wikipedia, web search, calculator, file processor
- β
**Manual Testing**: Individual question processing with detailed analysis
- β
**Official Evaluation**: GAIA benchmark submission
π‘ **Status**: Optimal configuration for GAIA benchmark performance with real AI agents.
"""
# Check HuggingFace Spaces OAuth configuration
oauth_scopes = os.getenv("OAUTH_SCOPES")
oauth_client_id = os.getenv("OAUTH_CLIENT_ID")
# Accept both 'inference-api' and 'inference' as valid inference scopes
has_inference_scope = oauth_scopes and ("inference-api" in oauth_scopes or "inference" in oauth_scopes)
if not profile:
oauth_status = ""
if oauth_client_id:
if has_inference_scope:
oauth_status = "**π OAuth Configuration**: β
Space configured with inference scope"
else:
oauth_status = "**β οΈ OAuth Configuration**: Space OAuth enabled but missing inference scope"
else:
oauth_status = "**β OAuth Configuration**: Space not configured for OAuth (missing `hf_oauth: true` in README.md)"
return f"""
### π Authentication Status: Not Logged In
Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
{oauth_status}
**What you need:**
- π HuggingFace login with `read` and `inference` permissions
- π€ Access to Qwen 2.5 models via HF Inference API
- π§ LangGraph multi-agent system capabilities
**π OAuth Scopes**: Login requests inference scope for Qwen model access.
**π Expected Performance**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
**β οΈ No Fallbacks**: System requires proper authentication - no simplified responses.
"""
username = profile.username
oauth_token = getattr(profile, 'oauth_token', None) or getattr(profile, 'token', None)
# Try multiple methods to extract OAuth token
if not oauth_token:
for attr in ['access_token', 'id_token', 'bearer_token']:
token = getattr(profile, attr, None)
if token:
oauth_token = token
logger.info(f"π Found OAuth token via {attr}")
break
# If still no token, check if profile has any token-like attributes
if not oauth_token and hasattr(profile, '__dict__'):
token_attrs = [attr for attr in profile.__dict__.keys() if 'token' in attr.lower()]
if token_attrs:
logger.info(f"π Available token attributes: {token_attrs}")
# Try the first available token attribute
oauth_token = getattr(profile, token_attrs[0], None)
if oauth_token:
logger.info(f"π Using token from {token_attrs[0]}")
scope_info = check_oauth_scopes(oauth_token) if oauth_token else {
"logged_in": True,
"scopes": [],
"can_inference": False,
"can_read": False,
"user_info": {},
"message": "Logged in but no OAuth token found"
}
status_parts = [f"### π Authentication Status: Logged In as {username}"]
# Safely access user_info
user_info = scope_info.get("user_info", {})
if user_info and user_info.get("fullname"):
status_parts.append(f"**Full Name**: {user_info['fullname']}")
# HuggingFace Spaces OAuth Environment Status
if oauth_client_id:
if has_inference_scope:
status_parts.append("**π Space OAuth**: β
Configured with inference scope")
else:
status_parts.append("**π Space OAuth**: β οΈ Missing inference scope in README.md")
status_parts.append(f"**Available Scopes**: {oauth_scopes}")
else:
status_parts.append("**π Space OAuth**: β Not configured (`hf_oauth: true` missing)")
# Safely access scopes
scopes = scope_info.get("scopes", [])
status_parts.append(f"**Detected Token Scopes**: {', '.join(scopes) if scopes else 'None detected'}")
status_parts.append("")
status_parts.append("**System Capabilities:**")
# Safely access capabilities
can_inference = scope_info.get("can_inference", False)
can_read = scope_info.get("can_read", False)
if can_inference:
status_parts.extend([
"- β
**Qwen Model Access**: Full Qwen 2.5 model capabilities (7B/32B/72B)",
"- β
**High Performance**: 30%+ expected GAIA score",
"- β
**LangGraph Workflow**: Multi-agent orchestration with synthesis",
"- β
**Specialized Agents**: Web research, file processing, reasoning",
"- β
**Professional Tools**: Wikipedia, web search, calculator, file processor",
"- β
**Inference Access**: Full model generation capabilities"
])
else:
status_parts.extend([
"- β **No Qwen Model Access**: Insufficient OAuth permissions",
"- β **No LangGraph Workflow**: Requires inference permissions",
"- β **Limited Functionality**: Cannot process GAIA questions",
"- β **No Inference Access**: Read-only permissions detected"
])
if can_read:
status_parts.append("- β
**Profile Access**: Can read user information")
status_parts.extend([
"- β
**Manual Testing**: Individual question processing (if authenticated)",
"- β
**Official Evaluation**: GAIA benchmark submission (if authenticated)"
])
if not can_inference:
if not has_inference_scope:
status_parts.extend([
"",
"π§ **Space Configuration Issue**: Add inference scope to README.md:",
"```yaml",
"hf_oauth_scopes:",
" - inference-api",
"```",
"**After updating**: Space will restart and request proper scopes on next login."
])
status_parts.extend([
"",
"π **Authentication Required**: Your OAuth session lacks inference permissions.",
"**Solution**: Logout and login again to request full inference access.",
"**Alternative**: Set HF_TOKEN as a Space secret for guaranteed Qwen model access.",
"**Note**: System requires Qwen model access - no simplified fallbacks available."
])
# Add specific guidance if we couldn't find an OAuth token
if not oauth_token:
status_parts.extend([
"",
"π **OAuth Token Issue**: Could not extract OAuth token from your session.",
"**Troubleshooting**: Click 'π Debug OAuth' button above to investigate.",
"**Common Fix**: Logout and login again to refresh your OAuth session."
])
else:
status_parts.extend([
"",
"π **Excellent**: You have full inference access for optimal GAIA performance!",
"π€ **Ready**: LangGraph workflow with Qwen models fully operational."
])
return "\n".join(status_parts)
def run_and_submit_all(profile: gr.OAuthProfile | None):
"""
Fetches all questions from Unit 4 API, runs the GAIA Agent with LangGraph workflow,
and displays the results. Handles OAuth authentication at runtime.
"""
start_time = time.time()
# Initialize result logger
result_logger = GAIAResultLogger()
# Check OAuth environment configuration first
oauth_client_id = os.getenv("OAUTH_CLIENT_ID")
oauth_scopes = os.getenv("OAUTH_SCOPES")
if not oauth_client_id:
return "β OAuth not configured. Please add 'hf_oauth: true' to README.md", None, format_auth_status(None), None, None, None
# Accept both 'inference-api' and 'inference' as valid inference scopes
if not oauth_scopes or not ("inference-api" in oauth_scopes or "inference" in oauth_scopes):
return f"β Missing inference scope. Current scopes: {oauth_scopes}. Please add inference scope to README.md", None, format_auth_status(None), None, None, None
# Get space info for code submission
space_id = os.getenv("SPACE_ID")
# Priority order for token: 1) HF_TOKEN env var, 2) OAuth token from profile
hf_token = os.getenv("HF_TOKEN")
oauth_token = None
username = "unknown_user"
if hf_token:
logger.info("π― Using HF_TOKEN environment variable for Qwen model access")
oauth_token = hf_token
username = "hf_token_user"
elif profile:
username = f"{profile.username}"
# Try multiple ways to extract OAuth token
oauth_token = getattr(profile, 'oauth_token', None) or getattr(profile, 'token', None)
if not oauth_token:
for attr in ['access_token', 'id_token', 'bearer_token']:
token = getattr(profile, attr, None)
if token:
oauth_token = token
logger.info(f"π Found OAuth token via {attr}")
break
if oauth_token:
logger.info(f"β
User logged in: {username}, OAuth token extracted successfully")
# Test OAuth token validity
try:
headers = {"Authorization": f"Bearer {oauth_token}"}
test_response = requests.get("https://huggingface.co/api/whoami", headers=headers, timeout=5)
if test_response.status_code == 401:
logger.error("β OAuth token has insufficient scopes for Qwen model inference")
return "Authentication Error: Your OAuth token lacks inference permissions. Please logout and login again to refresh your OAuth session.", None, format_auth_status(profile), None, None, None
elif test_response.status_code == 200:
logger.info("β
OAuth token validated successfully")
else:
logger.warning(f"β οΈ OAuth token validation returned {test_response.status_code}")
except Exception as e:
logger.warning(f"β οΈ Could not validate OAuth token: {e}")
else:
logger.warning(f"β οΈ User {username} logged in but no OAuth token found")
return f"OAuth Token Missing: Could not extract authentication token for user {username}. Please logout and login again.", None, format_auth_status(profile), None, None, None
else:
logger.error("β No authentication provided")
return "Authentication Required: Please login with HuggingFace. Your Space has OAuth configured but you need to login first.", None, format_auth_status(None), None, None, None
if not oauth_token:
return "Authentication Required: Valid token with inference permissions needed for Qwen model access.", None, format_auth_status(profile), None, None, None
# Get authentication status for display
auth_status = format_auth_status(profile)
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# 1. Instantiate GAIA Agent with LangGraph workflow
try:
logger.info("π Creating GAIA Agent with LangGraph workflow and Qwen models")
agent = GAIAAgentApp.create_with_oauth_token(oauth_token)
if not agent.initialized:
return "System Error: GAIA Agent failed to initialize with LangGraph workflow", None, auth_status, None, None, None
logger.info("β
GAIA Agent initialized successfully")
except ValueError as ve:
logger.error(f"Authentication error: {ve}")
return f"Authentication Error: {ve}", None, auth_status, None, None, None
except RuntimeError as re:
logger.error(f"System initialization error: {re}")
return f"System Error: {re}", None, auth_status, None, None, None
except Exception as e:
logger.error(f"Unexpected error initializing agent: {e}")
return f"Unexpected Error: {e}. Please check your authentication and try again.", None, auth_status, None, None, None
# Agent code URL
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Local Development"
logger.info(f"Agent code URL: {agent_code}")
# 2. Fetch Questions
logger.info(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
logger.error("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None, auth_status, None, None, None
logger.info(f"Fetched {len(questions_data)} questions.")
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None, auth_status, None, None, None
except requests.exceptions.JSONDecodeError as e:
logger.error(f"Error decoding JSON response from questions endpoint: {e}")
return f"Error decoding server response for questions: {e}", None, auth_status, None, None, None
except Exception as e:
logger.error(f"An unexpected error occurred fetching questions: {e}")
return f"An unexpected error occurred fetching questions: {e}", None, auth_status, None, None, None
# 3. Run GAIA Agent on questions
results_log = []
answers_payload = []
logger.info(f"π€ Running GAIA Agent on {len(questions_data)} questions with LangGraph workflow...")
for i, item in enumerate(questions_data, 1):
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
logger.warning(f"Skipping item with missing task_id or question: {item}")
continue
logger.info(f"Processing question {i}/{len(questions_data)}: {task_id}")
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": submitted_answer[:200] + "..." if len(submitted_answer) > 200 else submitted_answer
})
logger.info(f"β
Question {i} processed successfully")
except Exception as e:
logger.error(f"Error running GAIA agent on task {task_id}: {e}")
error_answer = f"AGENT ERROR: {str(e)}"
answers_payload.append({"task_id": task_id, "submitted_answer": error_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": error_answer
})
if not answers_payload:
logger.error("GAIA Agent did not produce any answers to submit.")
return "GAIA Agent did not produce any answers to submit.", pd.DataFrame(results_log), auth_status, None, None, None
# 4. Prepare and submit results
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"π GAIA Agent finished processing {len(answers_payload)} questions. Submitting results for user '{username}'..."
logger.info(status_update)
# 5. Submit to Unit 4 API
logger.info(f"π€ Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=120)
logger.info(f"π¨ Unit 4 API response status: {response.status_code}")
response.raise_for_status()
result_data = response.json()
# Log the actual response for debugging
logger.info(f"π Unit 4 API response data: {result_data}")
# Calculate execution time
execution_time = time.time() - start_time
# 6. Log results to files
logger.info("π Logging evaluation results...")
logged_files = result_logger.log_evaluation_results(
username=username,
questions_data=questions_data,
results_log=results_log,
final_result=result_data,
execution_time=execution_time
)
# Prepare download files
csv_file = logged_files.get("csv")
json_file = logged_files.get("json")
summary_file = logged_files.get("summary")
final_status = (
f"π GAIA Agent Evaluation Complete!\n"
f"π€ User: {result_data.get('username')}\n"
f"π Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"β±οΈ Execution Time: {execution_time:.2f} seconds\n"
f"π¬ API Response: {result_data.get('message', 'No message received.')}\n\n"
f"π Results saved to {len([f for f in [csv_file, json_file, summary_file] if f])} files for download."
)
logger.info("β
GAIA evaluation completed successfully")
results_df = pd.DataFrame(results_log)
return final_status, results_df, auth_status, csv_file, json_file, summary_file
except requests.exceptions.HTTPError as e:
error_detail = f"Server responded with status {e.response.status_code}."
try:
error_json = e.response.json()
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
except requests.exceptions.JSONDecodeError:
error_detail += f" Response: {e.response.text[:500]}"
status_message = f"β Submission Failed: {error_detail}"
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, auth_status, None, None, None
except requests.exceptions.Timeout:
status_message = "β Submission Failed: The request timed out."
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, auth_status, None, None, None
except requests.exceptions.RequestException as e:
status_message = f"β Submission Failed: Network error - {e}"
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, auth_status, None, None, None
except Exception as e:
status_message = f"β An unexpected error occurred during submission: {e}"
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, auth_status, None, None, None
def create_interface():
"""Create the Gradio interface with both Unit 4 API and manual testing"""
# Note: We don't initialize GAIAAgentApp here since it requires authentication
# Each request will create its own authenticated instance
# Custom CSS for better styling
css = """
/* Base styling for proper contrast */
.gradio-container {
color: #3c3c3c !important;
background-color: #faf9f7 !important;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
/* Fix all text elements EXCEPT buttons */
.gradio-container *:not(button):not(.gr-button):not(.gr-button-primary):not(.gr-button-secondary),
.gradio-container *:not(button):not(.gr-button):not(.gr-button-primary):not(.gr-button-secondary)::before,
.gradio-container *:not(button):not(.gr-button):not(.gr-button-primary):not(.gr-button-secondary)::after {
color: #3c3c3c !important;
}
/* Headers */
.gradio-container h1,
.gradio-container h2,
.gradio-container h3,
.gradio-container h4,
.gradio-container h5,
.gradio-container h6 {
color: #2c2c2c !important;
font-weight: 600 !important;
}
/* Paragraphs and text content */
.gradio-container p,
.gradio-container div:not(.gr-button):not(.gr-button-primary):not(.gr-button-secondary),
.gradio-container span:not(.gr-button):not(.gr-button-primary):not(.gr-button-secondary),
.gradio-container label {
color: #3c3c3c !important;
}
/* Input fields */
.gradio-container input,
.gradio-container textarea {
color: #3c3c3c !important;
background-color: #ffffff !important;
border: 1px solid #d4c4b0 !important;
border-radius: 6px !important;
}
/* Buttons - Subtle professional styling */
.gradio-container button,
.gradio-container .gr-button,
.gradio-container .gr-button-primary,
.gradio-container .gr-button-secondary,
.gradio-container button *,
.gradio-container .gr-button *,
.gradio-container .gr-button-primary *,
.gradio-container .gr-button-secondary * {
color: #3c3c3c !important;
font-weight: 500 !important;
text-shadow: none !important;
border-radius: 6px !important;
border: 1px solid #d4c4b0 !important;
transition: all 0.2s ease !important;
}
.gradio-container .gr-button-primary,
.gradio-container button[variant="primary"] {
background: #f5f3f0 !important;
color: #3c3c3c !important;
border: 1px solid #d4c4b0 !important;
padding: 8px 16px !important;
border-radius: 6px !important;
}
.gradio-container .gr-button-secondary,
.gradio-container button[variant="secondary"] {
background: #ffffff !important;
color: #3c3c3c !important;
border: 1px solid #d4c4b0 !important;
padding: 8px 16px !important;
border-radius: 6px !important;
}
.gradio-container button:not([variant]) {
background: #f8f6f3 !important;
color: #3c3c3c !important;
border: 1px solid #d4c4b0 !important;
padding: 8px 16px !important;
border-radius: 6px !important;
}
/* Button hover states - subtle changes */
.gradio-container button:hover,
.gradio-container .gr-button:hover,
.gradio-container .gr-button-primary:hover {
background: #ede9e4 !important;
color: #2c2c2c !important;
border: 1px solid #c4b49f !important;
transform: translateY(-1px) !important;
box-shadow: 0 2px 4px rgba(0,0,0,0.08) !important;
}
.gradio-container .gr-button-secondary:hover {
background: #f5f3f0 !important;
color: #2c2c2c !important;
border: 1px solid #c4b49f !important;
transform: translateY(-1px) !important;
box-shadow: 0 2px 4px rgba(0,0,0,0.08) !important;
}
/* Login button styling */
.gradio-container .gr-button:contains("Login"),
.gradio-container button:contains("Login") {
background: #e8e3dc !important;
color: #3c3c3c !important;
border: 1px solid #d4c4b0 !important;
}
/* Markdown content */
.gradio-container .gr-markdown,
.gradio-container .markdown,
.gradio-container .prose {
color: #3c3c3c !important;
background-color: transparent !important;
}
/* Special content boxes */
.container {
max-width: 1200px;
margin: auto;
padding: 20px;
background-color: #faf9f7 !important;
color: #3c3c3c !important;
}
.output-markdown {
font-size: 16px;
line-height: 1.6;
color: #3c3c3c !important;
background-color: #faf9f7 !important;
}
.details-box {
background-color: #f5f3f0 !important;
padding: 15px;
border-radius: 8px;
margin: 10px 0;
color: #3c3c3c !important;
border: 1px solid #e0d5c7 !important;
}
.reasoning-box {
background-color: #ffffff !important;
padding: 20px;
border: 1px solid #e0d5c7 !important;
border-radius: 8px;
color: #3c3c3c !important;
}
.unit4-section {
background-color: #f0ede8 !important;
padding: 20px;
border-radius: 8px;
margin: 20px 0;
color: #4a4035 !important;
border: 1px solid #d4c4b0 !important;
}
.unit4-section h1,
.unit4-section h2,
.unit4-section h3,
.unit4-section p,
.unit4-section div:not(button):not(.gr-button) {
color: #4a4035 !important;
}
/* Login section */
.oauth-login {
background: #f5f3f0 !important;
padding: 10px;
border-radius: 5px;
margin: 10px 0;
color: #3c3c3c !important;
border: 1px solid #e0d5c7 !important;
}
/* Tables */
.gradio-container table,
.gradio-container th,
.gradio-container td {
color: #3c3c3c !important;
background-color: #ffffff !important;
border: 1px solid #e0d5c7 !important;
}
.gradio-container th {
background-color: #f5f3f0 !important;
font-weight: 600 !important;
}
/* Examples and other interactive elements */
.gradio-container .gr-examples,
.gradio-container .gr-file,
.gradio-container .gr-textbox,
.gradio-container .gr-checkbox {
color: #3c3c3c !important;
background-color: #ffffff !important;
}
/* Fix any remaining text contrast issues */
.gradio-container .gr-form,
.gradio-container .gr-panel,
.gradio-container .gr-block {
color: #3c3c3c !important;
background-color: transparent !important;
}
/* Ensure proper text on light backgrounds */
.gradio-container .light,
.gradio-container [data-theme="light"] {
color: #3c3c3c !important;
background-color: #faf9f7 !important;
}
/* Override any problematic inline styles but preserve button colors */
.gradio-container [style*="color: white"]:not(button):not(.gr-button) {
color: #3c3c3c !important;
}
/* Professional spacing and shadows */
.gradio-container .gr-box {
box-shadow: 0 1px 3px rgba(0,0,0,0.1) !important;
border-radius: 8px !important;
}
/* Override any remaining purple/blue elements */
.gradio-container .gr-textbox,
.gradio-container .gr-dropdown,
.gradio-container .gr-number,
.gradio-container .gr-slider {
background-color: #ffffff !important;
border: 1px solid #d4c4b0 !important;
color: #3c3c3c !important;
}
/* Force override any Gradio default styling */
.gradio-container * {
background-color: inherit !important;
}
.gradio-container *[style*="background-color: rgb(239, 68, 68)"],
.gradio-container *[style*="background-color: rgb(59, 130, 246)"],
.gradio-container *[style*="background-color: rgb(147, 51, 234)"],
.gradio-container *[style*="background-color: rgb(16, 185, 129)"] {
background-color: #f5f3f0 !important;
color: #3c3c3c !important;
border: 1px solid #d4c4b0 !important;
}
/* Loading states */
.gradio-container .loading {
background-color: #f5f3f0 !important;
color: #6b5d4f !important;
}
/* Progress bars */
.gradio-container .gr-progress {
background-color: #f5f3f0 !important;
}
.gradio-container .gr-progress-bar {
background-color: #a08b73 !important;
}
"""
# Configure OAuth with full inference access
oauth_config = {
"scopes": ["read", "inference"], # Request both read and inference access
}
with gr.Blocks(css=css, title="GAIA Agent System", theme=gr.themes.Soft()) as interface:
# Header
gr.Markdown("""
# π€ GAIA Agent System
**Advanced Multi-Agent AI System for GAIA Benchmark Questions**
This system uses **Qwen 2.5 models (7B/32B/72B)** with specialized agents orchestrated through
**LangGraph** to provide accurate, well-reasoned answers to complex questions.
**Architecture**: Router β Specialized Agents β Tools β Synthesizer β Final Answer
""")
# Unit 4 API Section
with gr.Row(elem_classes=["unit4-section"]):
with gr.Column():
gr.Markdown("""
## π GAIA Benchmark Evaluation
**Official Unit 4 API Integration with LangGraph Workflow**
Run the complete GAIA Agent system using Qwen 2.5 models and LangGraph multi-agent
orchestration on all benchmark questions and submit results to the official API.
**System Requirements:**
1. π **Authentication**: HuggingFace login with `read` and `inference` permissions
2. π€ **Models**: Access to Qwen 2.5 models (7B/32B/72B) via HF Inference API
3. π§ **Workflow**: LangGraph multi-agent system with specialized tools
**Instructions:**
1. Log in to your Hugging Face account using the button below (**Full inference access required**)
2. Click 'Run GAIA Evaluation & Submit All Answers' to process all questions
3. View your official score and detailed results
β οΈ **Note**: This may take several minutes to process all questions with the multi-agent system.
π‘ **OAuth Scopes**: Login requests both `read` and `inference` permissions
for Qwen model access and optimal performance (30%+ GAIA score expected).
π« **No Fallbacks**: System requires proper authentication - simplified responses not available.
""")
# Authentication status section
auth_status_display = gr.Markdown(
"""
### π Authentication Status: Not Logged In
Please log in to access GAIA evaluation with Qwen models and LangGraph workflow.
**What you need:**
- π HuggingFace login with `read` and `inference` permissions
- π€ Access to Qwen 2.5 models via HF Inference API
- π§ LangGraph multi-agent system capabilities
**Expected Performance**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
""",
elem_classes=["oauth-login"]
)
# Add Gradio's built-in OAuth login button
gr.LoginButton()
with gr.Row():
refresh_auth_button = gr.Button("π Refresh Auth Status", variant="secondary", scale=1)
unit4_run_button = gr.Button(
"π Run GAIA Evaluation & Submit All Answers",
variant="primary",
scale=2
)
unit4_status_output = gr.Textbox(
label="Evaluation Status / Submission Result",
lines=5,
interactive=False
)
unit4_results_table = gr.DataFrame(
label="Questions and GAIA Agent Answers",
wrap=True
)
# Download section
gr.Markdown("### π Download Results")
gr.Markdown("After evaluation completes, download your results in different formats:")
with gr.Row():
csv_download = gr.File(
label="π CSV Results",
visible=False,
interactive=False
)
json_download = gr.File(
label="π Detailed JSON",
visible=False,
interactive=False
)
summary_download = gr.File(
label="π Summary Report",
visible=False,
interactive=False
)
gr.Markdown("---")
# Manual Testing Section
gr.Markdown("""
## π§ͺ Manual Question Testing
Test individual questions with detailed analysis using **Qwen models** and **LangGraph workflow**.
**Features:**
- π€ **Qwen 2.5 Models**: Intelligent tier selection (7B β 32B β 72B) based on complexity
- π§ **LangGraph Orchestration**: Multi-agent workflow with synthesis
- π§ **Specialized Agents**: Router, web research, file processing, mathematical reasoning
- π **Detailed Analysis**: Processing details, confidence scores, cost tracking
""")
with gr.Row():
with gr.Column(scale=2):
# Input section
gr.Markdown("### π Input")
question_input = gr.Textbox(
label="Question",
placeholder="Enter your question here...",
lines=3,
max_lines=10
)
file_input = gr.File(
label="Optional File Upload",
file_types=[".txt", ".csv", ".xlsx", ".py", ".json", ".png", ".jpg", ".mp3", ".wav"],
type="filepath"
)
with gr.Row():
show_reasoning = gr.Checkbox(
label="Show detailed reasoning",
value=False
)
submit_btn = gr.Button(
"π Process Question",
variant="secondary"
)
# Examples
gr.Markdown("#### π‘ Example Questions")
example_questions = [
"How many studio albums were published by Mercedes Sosa between 2000 and 2009?",
"What is the capital of the country that has the most time zones?",
"Calculate the compound interest on $1000 at 5% annual rate compounded quarterly for 3 years",
"What is the square root of the sum of the first 10 prime numbers?",
"Who was the first person to walk on the moon and what year did it happen?",
"Compare the GDP of Japan and Germany in 2023 and tell me the difference",
]
examples = gr.Examples(
examples=example_questions,
inputs=[question_input],
cache_examples=False
)
with gr.Column(scale=3):
# Output section
gr.Markdown("### π Results")
answer_output = gr.Markdown(
label="Answer",
elem_classes=["output-markdown"]
)
details_output = gr.Markdown(
label="Processing Details",
elem_classes=["details-box"]
)
reasoning_output = gr.Markdown(
label="Detailed Reasoning",
visible=False,
elem_classes=["reasoning-box"]
)
# Event handlers for Unit 4 API - Using Gradio's built-in OAuth
def run_gaia_evaluation(oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
"""Run GAIA evaluation using Gradio's built-in OAuth"""
start_time = time.time()
# Initialize result logger
result_logger = GAIAResultLogger()
# Check authentication using Gradio's OAuth parameters
if oauth_token is None or profile is None:
return "β Authentication Required: Please login with HuggingFace to access GAIA evaluation.", None, None, None, None, None
username = profile.username if profile else "unknown_user"
hf_token = oauth_token.token if oauth_token else None
if not hf_token:
return "β OAuth Token Missing: Could not extract authentication token. Please logout and login again.", None, None, None, None, None
logger.info(f"β
Starting GAIA evaluation for user: {username}")
# Rest of the function exactly as in run_and_submit_all but using oauth_token.token
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
# Get space info for code submission
space_id = os.getenv("SPACE_ID")
# 1. Instantiate GAIA Agent with LangGraph workflow
try:
logger.info("π Creating GAIA Agent with LangGraph workflow and Qwen models")
agent = GAIAAgentApp.create_with_oauth_token(hf_token)
if not agent.initialized:
return "β System Error: GAIA Agent failed to initialize with LangGraph workflow", None, None, None, None, None
logger.info("β
GAIA Agent initialized successfully")
except ValueError as ve:
logger.error(f"Authentication error: {ve}")
return f"β Authentication Error: {ve}", None, None, None, None, None
except RuntimeError as re:
logger.error(f"System initialization error: {re}")
return f"β System Error: {re}", None, None, None, None, None
except Exception as e:
logger.error(f"Unexpected error initializing agent: {e}")
return f"β Unexpected Error: {e}. Please check your authentication and try again.", None, None, None, None, None
# Agent code URL
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Local Development"
logger.info(f"Agent code URL: {agent_code}")
# 2. Fetch Questions
logger.info(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
logger.error("Fetched questions list is empty.")
return "β Fetched questions list is empty or invalid format.", None, None, None, None, None
logger.info(f"β
Fetched {len(questions_data)} questions.")
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching questions: {e}")
return f"β Error fetching questions: {e}", None, None, None, None, None
except Exception as e:
logger.error(f"An unexpected error occurred fetching questions: {e}")
return f"β An unexpected error occurred fetching questions: {e}", None, None, None, None, None
# 3. Run GAIA Agent on questions
results_log = []
answers_payload = []
logger.info(f"π€ Running GAIA Agent on {len(questions_data)} questions with LangGraph workflow...")
for i, item in enumerate(questions_data, 1):
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
logger.warning(f"Skipping item with missing task_id or question: {item}")
continue
logger.info(f"Processing question {i}/{len(questions_data)}: {task_id}")
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": submitted_answer[:200] + "..." if len(submitted_answer) > 200 else submitted_answer
})
logger.info(f"β
Question {i} processed successfully")
except Exception as e:
logger.error(f"Error running GAIA agent on task {task_id}: {e}")
error_answer = f"AGENT ERROR: {str(e)}"
answers_payload.append({"task_id": task_id, "submitted_answer": error_answer})
results_log.append({
"Task ID": task_id,
"Question": question_text[:100] + "..." if len(question_text) > 100 else question_text,
"Submitted Answer": error_answer
})
if not answers_payload:
logger.error("GAIA Agent did not produce any answers to submit.")
return "β GAIA Agent did not produce any answers to submit.", pd.DataFrame(results_log), None, None, None, None
# 4. Prepare and submit results
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
status_update = f"π GAIA Agent finished processing {len(answers_payload)} questions. Submitting results for user '{username}'..."
logger.info(status_update)
# 5. Submit to Unit 4 API
logger.info(f"π€ Submitting {len(answers_payload)} answers to: {submit_url}")
try:
response = requests.post(submit_url, json=submission_data, timeout=120)
logger.info(f"π¨ Unit 4 API response status: {response.status_code}")
response.raise_for_status()
result_data = response.json()
# Log the actual response for debugging
logger.info(f"π Unit 4 API response data: {result_data}")
# Calculate execution time
execution_time = time.time() - start_time
# 6. Log results to files
logger.info("π Logging evaluation results...")
logged_files = result_logger.log_evaluation_results(
username=username,
questions_data=questions_data,
results_log=results_log,
final_result=result_data,
execution_time=execution_time
)
# Prepare download files
csv_file = logged_files.get("csv")
json_file = logged_files.get("json")
summary_file = logged_files.get("summary")
final_status = (
f"π GAIA Agent Evaluation Complete!\n"
f"π€ User: {result_data.get('username')}\n"
f"π Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"β±οΈ Execution Time: {execution_time:.2f} seconds\n"
f"π¬ API Response: {result_data.get('message', 'No message received.')}\n\n"
f"π Results saved to {len([f for f in [csv_file, json_file, summary_file] if f])} files for download."
)
logger.info("β
GAIA evaluation completed successfully")
results_df = pd.DataFrame(results_log)
# Update download file visibility and values
csv_update = gr.update(value=csv_file, visible=csv_file is not None)
json_update = gr.update(value=json_file, visible=json_file is not None)
summary_update = gr.update(value=summary_file, visible=summary_file is not None)
return final_status, results_df, csv_update, json_update, summary_update
except requests.exceptions.HTTPError as e:
error_detail = f"Server responded with status {e.response.status_code}."
try:
error_json = e.response.json()
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
except requests.exceptions.JSONDecodeError:
error_detail += f" Response: {e.response.text[:500]}"
status_message = f"β Submission Failed: {error_detail}"
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, None, None, None
except Exception as e:
status_message = f"β An unexpected error occurred during submission: {e}"
logger.error(status_message)
results_df = pd.DataFrame(results_log)
return status_message, results_df, None, None, None
def update_auth_status(profile: gr.OAuthProfile | None):
"""Update authentication status display using Gradio's OAuth"""
if profile is None:
return """
### π Authentication Status: Not Logged In
Please click the "Sign in with Hugging Face" button above to access GAIA evaluation.
**What you need:**
- π HuggingFace login with `read` and `inference` permissions
- π€ Access to Qwen 2.5 models via HF Inference API
- π§ LangGraph multi-agent system capabilities
**Expected Performance**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
"""
else:
return f"""
### π Authentication Status: β
Logged In as {profile.username}
**β
Ready for GAIA Evaluation!**
- β
**OAuth Profile**: {profile.name or profile.username}
- β
**Qwen Model Access**: Available via HF Inference API
- β
**LangGraph Workflow**: Multi-agent orchestration ready
- β
**Official Evaluation**: Click "Run GAIA Evaluation" to start
π― **Expected Results**: 30%+ GAIA score with full LangGraph workflow and Qwen models.
"""
# Set up automatic login state checking
interface.load(
fn=update_auth_status,
outputs=[auth_status_display]
)
unit4_run_button.click(
fn=run_gaia_evaluation,
inputs=[], # Gradio automatically injects OAuth parameters
outputs=[unit4_status_output, unit4_results_table, csv_download, json_download, summary_download]
)
# Refresh authentication status manually
refresh_auth_button.click(
fn=update_auth_status,
outputs=[auth_status_display]
)
# Event handlers for manual testing
def process_and_update(question, file_input, show_reasoning, oauth_token: gr.OAuthToken | None, profile: gr.OAuthProfile | None):
"""Process question with authentication check"""
if not question.strip():
return "β Please provide a question", "", "", gr.update(visible=False)
# Check for authentication - prioritize HF_TOKEN, then OAuth
hf_token = os.getenv("HF_TOKEN")
if not hf_token and (oauth_token is None or profile is None):
error_msg = """
## β Authentication Required
**This system requires authentication to access Qwen models and LangGraph workflow.**
**How to authenticate:**
1. π **Login with HuggingFace**: Use the "Sign in with Hugging Face" button above
2. π **Use Official Evaluation**: Login via the GAIA Benchmark section above
3. π **Get Token**: Visit https://huggingface.co/settings/tokens to create one with `inference` permissions
**Note**: Manual testing requires the same authentication as the official evaluation.
"""
return error_msg, "", "", gr.update(visible=False)
# Use HF_TOKEN if available, otherwise use OAuth token
auth_token = hf_token if hf_token else (oauth_token.token if oauth_token else None)
if not auth_token:
return "β No valid authentication token found", "", "", gr.update(visible=False)
try:
# Create authenticated app instance for this request
app = GAIAAgentApp(hf_token=auth_token)
# Process the question
answer, details, reasoning = app.process_question_detailed(question, file_input, show_reasoning)
# Format answer with markdown
formatted_answer = f"""
## π― Answer
{answer}
"""
# Format details
formatted_details = f"""
## π Processing Details
{details}
"""
# Show/hide reasoning based on checkbox
reasoning_visible = show_reasoning and reasoning.strip()
return (
formatted_answer,
formatted_details,
reasoning if reasoning_visible else "",
gr.update(visible=reasoning_visible)
)
except ValueError as ve:
error_msg = f"""
## β Authentication Error
{str(ve)}
**Solution**: Please ensure your authentication has `inference` permissions.
"""
return error_msg, "", "", gr.update(visible=False)
except RuntimeError as re:
error_msg = f"""
## β System Error
{str(re)}
**This may be due to:**
- Qwen model access issues
- HuggingFace Inference API unavailability
- Network connectivity problems
"""
return error_msg, "", "", gr.update(visible=False)
except Exception as e:
error_msg = f"""
## β Unexpected Error
{str(e)}
**Please try again or contact support if the issue persists.**
"""
return error_msg, "", "", gr.update(visible=False)
submit_btn.click(
fn=process_and_update,
inputs=[question_input, file_input, show_reasoning],
outputs=[answer_output, details_output, reasoning_output, reasoning_output]
)
# Show/hide reasoning based on checkbox
show_reasoning.change(
fn=lambda show: gr.update(visible=show),
inputs=[show_reasoning],
outputs=[reasoning_output]
)
# Footer
gr.Markdown("""
---
### π§ System Architecture
**LangGraph Multi-Agent Workflow:**
- **Router Agent**: Classifies questions and selects appropriate specialized agents (using 32B model for better accuracy)
- **Web Research Agent**: Multi-engine search with DuckDuckGo (primary), Tavily API (secondary), Wikipedia (fallback)
- **File Processing Agent**: Processes uploaded files (CSV, images, code, audio)
- **Reasoning Agent**: Handles mathematical calculations and logical reasoning
- **Synthesizer Agent**: Combines results from multiple agents into final answers
**Models Used**: Qwen 2.5 (7B/32B/72B) with intelligent tier selection for optimal cost/performance
**Tools Available**: Multi-engine web search (DuckDuckGo + Tavily + Wikipedia), mathematical calculator, multi-format file processor
### π Performance Metrics
- **Success Rate**: 30%+ expected on GAIA benchmark with full authentication
- **Average Response Time**: ~3-5 seconds per question depending on complexity
- **Cost Efficiency**: $0.01-0.40 per question depending on model tier selection
- **Architecture**: Multi-agent LangGraph orchestration with intelligent synthesis
- **Reliability**: Robust error handling and graceful degradation within workflow
- **Web Search**: 3-tier search system (DuckDuckGo β Tavily β Wikipedia) with smart query optimization
### π― Authentication Requirements
- **HF_TOKEN Environment Variable**: Best performance with full access to Qwen models
- **OAuth with Inference Scope**: Full access to Qwen 2.5 models via HuggingFace Inference API
- **Optional**: TAVILY_API_KEY for enhanced web search capabilities (1,000 free searches/month)
- **No Fallback Options**: System requires proper authentication for multi-agent functionality
""")
return interface
def main():
"""Main application entry point"""
# Check if running in production (HuggingFace Spaces)
is_production = (
os.getenv("GRADIO_ENV") == "production" or
os.getenv("SPACE_ID") is not None or
os.getenv("SPACE_HOST") is not None
)
# Check for space environment variables
space_host = os.getenv("SPACE_HOST")
space_id = os.getenv("SPACE_ID")
if space_host:
logger.info(f"β
SPACE_HOST found: {space_host}")
logger.info(f" Runtime URL: https://{space_host}")
else:
logger.info("βΉοΈ SPACE_HOST environment variable not found (running locally?).")
if space_id:
logger.info(f"β
SPACE_ID found: {space_id}")
logger.info(f" Repo URL: https://huggingface.co/spaces/{space_id}")
else:
logger.info("βΉοΈ SPACE_ID environment variable not found (running locally?).")
logger.info(f"π§ Production mode: {is_production}")
# Create interface
interface = create_interface()
# Launch configuration with OAuth scopes
if is_production:
# Production settings for HuggingFace Spaces with OAuth
launch_kwargs = {
"server_name": "0.0.0.0",
"server_port": int(os.getenv("PORT", 7860)),
"share": False,
"debug": False,
"show_error": True,
"quiet": False,
"favicon_path": None,
"auth": None,
# Configure OAuth with full inference access
"auth_message": "Login with HuggingFace for full inference access to models",
}
logger.info(f"π Launching in PRODUCTION mode on 0.0.0.0:{launch_kwargs['server_port']}")
logger.info("π OAuth configured to request 'read' and 'inference' scopes")
else:
# Development settings
launch_kwargs = {
"server_name": "127.0.0.1",
"server_port": 7860,
"share": False,
"debug": True,
"show_error": True,
"quiet": False,
"favicon_path": None,
"inbrowser": True,
"auth_message": "Login with HuggingFace for full inference access to models",
}
logger.info("π§ Launching in DEVELOPMENT mode on 127.0.0.1:7860")
# Set OAuth environment variables for HuggingFace Spaces
if is_production:
# These environment variables tell HF Spaces what OAuth scopes to request
os.environ["OAUTH_SCOPES"] = "read,inference"
os.environ["OAUTH_CLIENT_ID"] = os.getenv("OAUTH_CLIENT_ID", "")
logger.info("π OAuth environment configured for inference access")
interface.launch(**launch_kwargs)
def process_question_with_gaia_agent(question_text: str, question_id: str = None,
file_name: str = None, file_content: bytes = None) -> Dict[str, Any]:
"""
Process a GAIA question using enhanced multi-phase planning workflow
"""
try:
logger.info(f"π Processing GAIA question with enhanced workflow: {question_text[:100]}...")
# Create GAIA agent with enhanced capabilities
llm_client = QwenClient()
gaia_agent = GAIAAgentApp.create_with_qwen_client(llm_client)
# Use enhanced LangGraph workflow with multi-step planning
result = gaia_agent.process_with_langgraph(question_text, question_id)
# Enhanced result formatting for GAIA compliance
enhanced_result = {
"question_id": question_id or "unknown",
"question": question_text,
"answer": result["answer"],
"confidence": result["confidence"],
"reasoning": result["reasoning"],
"cost_estimate": result["cost"],
"processing_time": result["processing_time"],
"workflow_type": "enhanced_multi_phase",
"processing_details": result["processing_details"],
"agent_results": result["agent_results"],
"success": len(result["errors"]) == 0,
"error_messages": result["errors"]
}
return enhanced_result
except Exception as e:
error_msg = f"Enhanced GAIA processing failed: {str(e)}"
logger.error(error_msg)
return {
"question_id": question_id or "unknown",
"question": question_text,
"answer": "Enhanced processing failed",
"confidence": 0.0,
"reasoning": error_msg,
"cost_estimate": 0.0,
"processing_time": 0.0,
"workflow_type": "enhanced_multi_phase_failed",
"processing_details": {"error": error_msg},
"agent_results": [],
"success": False,
"error_messages": [error_msg]
}
if __name__ == "__main__":
main() |