Mansuba's picture
Rename AI_python_learning_system.ipynb to app.py
f4aa4c0 verified
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "code",
"source": [
"# Install required packages\n",
"!pip install gradio langchain openai python-dotenv sqlalchemy pandas numpy matplotlib seaborn pydantic langchain_openai chromadb tiktoken"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "0FO3eb2vNuhz",
"outputId": "08c18148-e281-4cb5-fd92-b42189a4727a"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Collecting gradio\n",
" Downloading gradio-5.25.2-py3-none-any.whl.metadata (16 kB)\n",
"Requirement already satisfied: langchain in /usr/local/lib/python3.11/dist-packages (0.3.23)\n",
"Requirement already satisfied: openai in /usr/local/lib/python3.11/dist-packages (1.75.0)\n",
"Collecting python-dotenv\n",
" Downloading python_dotenv-1.1.0-py3-none-any.whl.metadata (24 kB)\n",
"Requirement already satisfied: sqlalchemy in /usr/local/lib/python3.11/dist-packages (2.0.40)\n",
"Requirement already satisfied: pandas in /usr/local/lib/python3.11/dist-packages (2.2.2)\n",
"Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (2.0.2)\n",
"Requirement already satisfied: matplotlib in /usr/local/lib/python3.11/dist-packages (3.10.0)\n",
"Requirement already satisfied: seaborn in /usr/local/lib/python3.11/dist-packages (0.13.2)\n",
"Requirement already satisfied: pydantic in /usr/local/lib/python3.11/dist-packages (2.11.3)\n",
"Collecting langchain_openai\n",
" Downloading langchain_openai-0.3.14-py3-none-any.whl.metadata (2.3 kB)\n",
"Collecting chromadb\n",
" Downloading chromadb-1.0.6-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.9 kB)\n",
"Collecting tiktoken\n",
" Downloading tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.7 kB)\n",
"Collecting aiofiles<25.0,>=22.0 (from gradio)\n",
" Downloading aiofiles-24.1.0-py3-none-any.whl.metadata (10 kB)\n",
"Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (4.9.0)\n",
"Collecting fastapi<1.0,>=0.115.2 (from gradio)\n",
" Downloading fastapi-0.115.12-py3-none-any.whl.metadata (27 kB)\n",
"Collecting ffmpy (from gradio)\n",
" Downloading ffmpy-0.5.0-py3-none-any.whl.metadata (3.0 kB)\n",
"Collecting gradio-client==1.8.0 (from gradio)\n",
" Downloading gradio_client-1.8.0-py3-none-any.whl.metadata (7.1 kB)\n",
"Collecting groovy~=0.1 (from gradio)\n",
" Downloading groovy-0.1.2-py3-none-any.whl.metadata (6.1 kB)\n",
"Requirement already satisfied: httpx>=0.24.1 in /usr/local/lib/python3.11/dist-packages (from gradio) (0.28.1)\n",
"Requirement already satisfied: huggingface-hub>=0.28.1 in /usr/local/lib/python3.11/dist-packages (from gradio) (0.30.2)\n",
"Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.1.6)\n",
"Requirement already satisfied: markupsafe<4.0,>=2.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.0.2)\n",
"Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (3.10.16)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from gradio) (24.2)\n",
"Requirement already satisfied: pillow<12.0,>=8.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (11.1.0)\n",
"Collecting pydub (from gradio)\n",
" Downloading pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n",
"Collecting python-multipart>=0.0.18 (from gradio)\n",
" Downloading python_multipart-0.0.20-py3-none-any.whl.metadata (1.8 kB)\n",
"Requirement already satisfied: pyyaml<7.0,>=5.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (6.0.2)\n",
"Collecting ruff>=0.9.3 (from gradio)\n",
" Downloading ruff-0.11.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (25 kB)\n",
"Collecting safehttpx<0.2.0,>=0.1.6 (from gradio)\n",
" Downloading safehttpx-0.1.6-py3-none-any.whl.metadata (4.2 kB)\n",
"Collecting semantic-version~=2.0 (from gradio)\n",
" Downloading semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n",
"Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
" Downloading starlette-0.46.2-py3-none-any.whl.metadata (6.2 kB)\n",
"Collecting tomlkit<0.14.0,>=0.12.0 (from gradio)\n",
" Downloading tomlkit-0.13.2-py3-none-any.whl.metadata (2.7 kB)\n",
"Requirement already satisfied: typer<1.0,>=0.12 in /usr/local/lib/python3.11/dist-packages (from gradio) (0.15.2)\n",
"Requirement already satisfied: typing-extensions~=4.0 in /usr/local/lib/python3.11/dist-packages (from gradio) (4.13.2)\n",
"Collecting uvicorn>=0.14.0 (from gradio)\n",
" Downloading uvicorn-0.34.2-py3-none-any.whl.metadata (6.5 kB)\n",
"Requirement already satisfied: fsspec in /usr/local/lib/python3.11/dist-packages (from gradio-client==1.8.0->gradio) (2025.3.2)\n",
"Requirement already satisfied: websockets<16.0,>=10.0 in /usr/local/lib/python3.11/dist-packages (from gradio-client==1.8.0->gradio) (15.0.1)\n",
"Requirement already satisfied: langchain-core<1.0.0,>=0.3.51 in /usr/local/lib/python3.11/dist-packages (from langchain) (0.3.52)\n",
"Requirement already satisfied: langchain-text-splitters<1.0.0,>=0.3.8 in /usr/local/lib/python3.11/dist-packages (from langchain) (0.3.8)\n",
"Requirement already satisfied: langsmith<0.4,>=0.1.17 in /usr/local/lib/python3.11/dist-packages (from langchain) (0.3.31)\n",
"Requirement already satisfied: requests<3,>=2 in /usr/local/lib/python3.11/dist-packages (from langchain) (2.32.3)\n",
"Requirement already satisfied: distro<2,>=1.7.0 in /usr/local/lib/python3.11/dist-packages (from openai) (1.9.0)\n",
"Requirement already satisfied: jiter<1,>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from openai) (0.9.0)\n",
"Requirement already satisfied: sniffio in /usr/local/lib/python3.11/dist-packages (from openai) (1.3.1)\n",
"Requirement already satisfied: tqdm>4 in /usr/local/lib/python3.11/dist-packages (from openai) (4.67.1)\n",
"Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.11/dist-packages (from sqlalchemy) (3.2.0)\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas) (2.8.2)\n",
"Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas) (2025.2)\n",
"Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas) (2025.2)\n",
"Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.3.2)\n",
"Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (0.12.1)\n",
"Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (4.57.0)\n",
"Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.4.8)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (3.2.3)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.33.1 in /usr/local/lib/python3.11/dist-packages (from pydantic) (2.33.1)\n",
"Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from pydantic) (0.4.0)\n",
"Collecting langchain-core<1.0.0,>=0.3.51 (from langchain)\n",
" Downloading langchain_core-0.3.55-py3-none-any.whl.metadata (5.9 kB)\n",
"Collecting build>=1.0.3 (from chromadb)\n",
" Downloading build-1.2.2.post1-py3-none-any.whl.metadata (6.5 kB)\n",
"Collecting chroma-hnswlib==0.7.6 (from chromadb)\n",
" Downloading chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (252 bytes)\n",
"Collecting fastapi<1.0,>=0.115.2 (from gradio)\n",
" Downloading fastapi-0.115.9-py3-none-any.whl.metadata (27 kB)\n",
"Collecting posthog>=2.4.0 (from chromadb)\n",
" Downloading posthog-3.25.0-py2.py3-none-any.whl.metadata (3.0 kB)\n",
"Collecting onnxruntime>=1.14.1 (from chromadb)\n",
" Downloading onnxruntime-1.21.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl.metadata (4.5 kB)\n",
"Requirement already satisfied: opentelemetry-api>=1.2.0 in /usr/local/lib/python3.11/dist-packages (from chromadb) (1.32.1)\n",
"Collecting opentelemetry-exporter-otlp-proto-grpc>=1.2.0 (from chromadb)\n",
" Downloading opentelemetry_exporter_otlp_proto_grpc-1.32.1-py3-none-any.whl.metadata (2.5 kB)\n",
"Collecting opentelemetry-instrumentation-fastapi>=0.41b0 (from chromadb)\n",
" Downloading opentelemetry_instrumentation_fastapi-0.53b1-py3-none-any.whl.metadata (2.2 kB)\n",
"Requirement already satisfied: opentelemetry-sdk>=1.2.0 in /usr/local/lib/python3.11/dist-packages (from chromadb) (1.32.1)\n",
"Requirement already satisfied: tokenizers>=0.13.2 in /usr/local/lib/python3.11/dist-packages (from chromadb) (0.21.1)\n",
"Collecting pypika>=0.48.9 (from chromadb)\n",
" Downloading PyPika-0.48.9.tar.gz (67 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m3.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
"Collecting overrides>=7.3.1 (from chromadb)\n",
" Downloading overrides-7.7.0-py3-none-any.whl.metadata (5.8 kB)\n",
"Requirement already satisfied: importlib-resources in /usr/local/lib/python3.11/dist-packages (from chromadb) (6.5.2)\n",
"Requirement already satisfied: grpcio>=1.58.0 in /usr/local/lib/python3.11/dist-packages (from chromadb) (1.71.0)\n",
"Collecting bcrypt>=4.0.1 (from chromadb)\n",
" Downloading bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl.metadata (10 kB)\n",
"Collecting kubernetes>=28.1.0 (from chromadb)\n",
" Downloading kubernetes-32.0.1-py2.py3-none-any.whl.metadata (1.7 kB)\n",
"Requirement already satisfied: tenacity>=8.2.3 in /usr/local/lib/python3.11/dist-packages (from chromadb) (9.1.2)\n",
"Collecting mmh3>=4.0.1 (from chromadb)\n",
" Downloading mmh3-5.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (16 kB)\n",
"Requirement already satisfied: rich>=10.11.0 in /usr/local/lib/python3.11/dist-packages (from chromadb) (13.9.4)\n",
"Requirement already satisfied: jsonschema>=4.19.0 in /usr/local/lib/python3.11/dist-packages (from chromadb) (4.23.0)\n",
"Collecting starlette<1.0,>=0.40.0 (from gradio)\n",
" Downloading starlette-0.45.3-py3-none-any.whl.metadata (6.3 kB)\n",
"Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.11/dist-packages (from tiktoken) (2024.11.6)\n",
"Requirement already satisfied: idna>=2.8 in /usr/local/lib/python3.11/dist-packages (from anyio<5.0,>=3.0->gradio) (3.10)\n",
"Collecting pyproject_hooks (from build>=1.0.3->chromadb)\n",
" Downloading pyproject_hooks-1.2.0-py3-none-any.whl.metadata (1.3 kB)\n",
"Requirement already satisfied: certifi in /usr/local/lib/python3.11/dist-packages (from httpx>=0.24.1->gradio) (2025.1.31)\n",
"Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx>=0.24.1->gradio) (1.0.8)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx>=0.24.1->gradio) (0.14.0)\n",
"Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.28.1->gradio) (3.18.0)\n",
"Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=4.19.0->chromadb) (25.3.0)\n",
"Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=4.19.0->chromadb) (2024.10.1)\n",
"Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=4.19.0->chromadb) (0.36.2)\n",
"Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema>=4.19.0->chromadb) (0.24.0)\n",
"Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (1.17.0)\n",
"Requirement already satisfied: google-auth>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (2.38.0)\n",
"Requirement already satisfied: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (1.8.0)\n",
"Requirement already satisfied: requests-oauthlib in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (2.0.0)\n",
"Requirement already satisfied: oauthlib>=3.2.2 in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (3.2.2)\n",
"Requirement already satisfied: urllib3>=1.24.2 in /usr/local/lib/python3.11/dist-packages (from kubernetes>=28.1.0->chromadb) (2.3.0)\n",
"Collecting durationpy>=0.7 (from kubernetes>=28.1.0->chromadb)\n",
" Downloading durationpy-0.9-py3-none-any.whl.metadata (338 bytes)\n",
"Requirement already satisfied: jsonpatch<2.0,>=1.33 in /usr/local/lib/python3.11/dist-packages (from langchain-core<1.0.0,>=0.3.51->langchain) (1.33)\n",
"Requirement already satisfied: requests-toolbelt<2.0.0,>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from langsmith<0.4,>=0.1.17->langchain) (1.0.0)\n",
"Requirement already satisfied: zstandard<0.24.0,>=0.23.0 in /usr/local/lib/python3.11/dist-packages (from langsmith<0.4,>=0.1.17->langchain) (0.23.0)\n",
"Collecting coloredlogs (from onnxruntime>=1.14.1->chromadb)\n",
" Downloading coloredlogs-15.0.1-py2.py3-none-any.whl.metadata (12 kB)\n",
"Requirement already satisfied: flatbuffers in /usr/local/lib/python3.11/dist-packages (from onnxruntime>=1.14.1->chromadb) (25.2.10)\n",
"Requirement already satisfied: protobuf in /usr/local/lib/python3.11/dist-packages (from onnxruntime>=1.14.1->chromadb) (5.29.4)\n",
"Requirement already satisfied: sympy in /usr/local/lib/python3.11/dist-packages (from onnxruntime>=1.14.1->chromadb) (1.13.1)\n",
"Requirement already satisfied: deprecated>=1.2.6 in /usr/local/lib/python3.11/dist-packages (from opentelemetry-api>=1.2.0->chromadb) (1.2.18)\n",
"Requirement already satisfied: importlib-metadata<8.7.0,>=6.0 in /usr/local/lib/python3.11/dist-packages (from opentelemetry-api>=1.2.0->chromadb) (8.6.1)\n",
"Requirement already satisfied: googleapis-common-protos~=1.52 in /usr/local/lib/python3.11/dist-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb) (1.70.0)\n",
"Collecting opentelemetry-exporter-otlp-proto-common==1.32.1 (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb)\n",
" Downloading opentelemetry_exporter_otlp_proto_common-1.32.1-py3-none-any.whl.metadata (1.9 kB)\n",
"Collecting opentelemetry-proto==1.32.1 (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb)\n",
" Downloading opentelemetry_proto-1.32.1-py3-none-any.whl.metadata (2.4 kB)\n",
"Collecting opentelemetry-instrumentation-asgi==0.53b1 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb)\n",
" Downloading opentelemetry_instrumentation_asgi-0.53b1-py3-none-any.whl.metadata (2.1 kB)\n",
"Collecting opentelemetry-instrumentation==0.53b1 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb)\n",
" Downloading opentelemetry_instrumentation-0.53b1-py3-none-any.whl.metadata (6.8 kB)\n",
"Requirement already satisfied: opentelemetry-semantic-conventions==0.53b1 in /usr/local/lib/python3.11/dist-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (0.53b1)\n",
"Collecting opentelemetry-util-http==0.53b1 (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb)\n",
" Downloading opentelemetry_util_http-0.53b1-py3-none-any.whl.metadata (2.6 kB)\n",
"Requirement already satisfied: wrapt<2.0.0,>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from opentelemetry-instrumentation==0.53b1->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb) (1.17.2)\n",
"Collecting asgiref~=3.0 (from opentelemetry-instrumentation-asgi==0.53b1->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb)\n",
" Downloading asgiref-3.8.1-py3-none-any.whl.metadata (9.3 kB)\n",
"Collecting monotonic>=1.5 (from posthog>=2.4.0->chromadb)\n",
" Downloading monotonic-1.6-py2.py3-none-any.whl.metadata (1.5 kB)\n",
"Collecting backoff>=1.10.0 (from posthog>=2.4.0->chromadb)\n",
" Downloading backoff-2.2.1-py3-none-any.whl.metadata (14 kB)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2->langchain) (3.4.1)\n",
"Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich>=10.11.0->chromadb) (3.0.0)\n",
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.11/dist-packages (from rich>=10.11.0->chromadb) (2.18.0)\n",
"Requirement already satisfied: click>=8.0.0 in /usr/local/lib/python3.11/dist-packages (from typer<1.0,>=0.12->gradio) (8.1.8)\n",
"Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.11/dist-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
"Collecting httptools>=0.6.3 (from uvicorn[standard]>=0.18.3->chromadb)\n",
" Downloading httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (3.6 kB)\n",
"Collecting uvloop!=0.15.0,!=0.15.1,>=0.14.0 (from uvicorn[standard]>=0.18.3->chromadb)\n",
" Downloading uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.9 kB)\n",
"Collecting watchfiles>=0.13 (from uvicorn[standard]>=0.18.3->chromadb)\n",
" Downloading watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.9 kB)\n",
"Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (5.5.2)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.11/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (0.4.2)\n",
"Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.11/dist-packages (from google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (4.9.1)\n",
"Requirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.11/dist-packages (from importlib-metadata<8.7.0,>=6.0->opentelemetry-api>=1.2.0->chromadb) (3.21.0)\n",
"Requirement already satisfied: jsonpointer>=1.9 in /usr/local/lib/python3.11/dist-packages (from jsonpatch<2.0,>=1.33->langchain-core<1.0.0,>=0.3.51->langchain) (3.0.0)\n",
"Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->chromadb) (0.1.2)\n",
"Collecting humanfriendly>=9.1 (from coloredlogs->onnxruntime>=1.14.1->chromadb)\n",
" Downloading humanfriendly-10.0-py2.py3-none-any.whl.metadata (9.2 kB)\n",
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy->onnxruntime>=1.14.1->chromadb) (1.3.0)\n",
"Requirement already satisfied: pyasn1<0.7.0,>=0.6.1 in /usr/local/lib/python3.11/dist-packages (from pyasn1-modules>=0.2.1->google-auth>=1.0.1->kubernetes>=28.1.0->chromadb) (0.6.1)\n",
"Downloading gradio-5.25.2-py3-none-any.whl (46.9 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.9/46.9 MB\u001b[0m \u001b[31m21.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading gradio_client-1.8.0-py3-none-any.whl (322 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m322.2/322.2 kB\u001b[0m \u001b[31m24.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading python_dotenv-1.1.0-py3-none-any.whl (20 kB)\n",
"Downloading langchain_openai-0.3.14-py3-none-any.whl (62 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.4/62.4 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading chromadb-1.0.6-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.3 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.3/18.3 MB\u001b[0m \u001b[31m86.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.4 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m81.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading fastapi-0.115.9-py3-none-any.whl (94 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m94.9/94.9 kB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m60.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading aiofiles-24.1.0-py3-none-any.whl (15 kB)\n",
"Downloading bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl (284 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m284.2/284.2 kB\u001b[0m \u001b[31m18.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading build-1.2.2.post1-py3-none-any.whl (22 kB)\n",
"Downloading groovy-0.1.2-py3-none-any.whl (14 kB)\n",
"Downloading kubernetes-32.0.1-py2.py3-none-any.whl (2.0 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m80.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading langchain_core-0.3.55-py3-none-any.whl (434 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m434.1/434.1 kB\u001b[0m \u001b[31m31.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading mmh3-5.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (101 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m101.6/101.6 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading onnxruntime-1.21.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl (16.0 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m16.0/16.0 MB\u001b[0m \u001b[31m89.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading opentelemetry_exporter_otlp_proto_grpc-1.32.1-py3-none-any.whl (18 kB)\n",
"Downloading opentelemetry_exporter_otlp_proto_common-1.32.1-py3-none-any.whl (18 kB)\n",
"Downloading opentelemetry_proto-1.32.1-py3-none-any.whl (55 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.9/55.9 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading opentelemetry_instrumentation_fastapi-0.53b1-py3-none-any.whl (12 kB)\n",
"Downloading opentelemetry_instrumentation-0.53b1-py3-none-any.whl (30 kB)\n",
"Downloading opentelemetry_instrumentation_asgi-0.53b1-py3-none-any.whl (16 kB)\n",
"Downloading opentelemetry_util_http-0.53b1-py3-none-any.whl (7.3 kB)\n",
"Downloading overrides-7.7.0-py3-none-any.whl (17 kB)\n",
"Downloading posthog-3.25.0-py2.py3-none-any.whl (89 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m89.1/89.1 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading python_multipart-0.0.20-py3-none-any.whl (24 kB)\n",
"Downloading ruff-0.11.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (11.5 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.5/11.5 MB\u001b[0m \u001b[31m94.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading safehttpx-0.1.6-py3-none-any.whl (8.7 kB)\n",
"Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n",
"Downloading starlette-0.45.3-py3-none-any.whl (71 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.5/71.5 kB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading tomlkit-0.13.2-py3-none-any.whl (37 kB)\n",
"Downloading uvicorn-0.34.2-py3-none-any.whl (62 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.5/62.5 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading ffmpy-0.5.0-py3-none-any.whl (6.0 kB)\n",
"Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n",
"Downloading backoff-2.2.1-py3-none-any.whl (15 kB)\n",
"Downloading durationpy-0.9-py3-none-any.whl (3.5 kB)\n",
"Downloading httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (459 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m459.8/459.8 kB\u001b[0m \u001b[31m33.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading monotonic-1.6-py2.py3-none-any.whl (8.2 kB)\n",
"Downloading uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.0 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.0/4.0 MB\u001b[0m \u001b[31m64.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading watchfiles-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (454 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m454.8/454.8 kB\u001b[0m \u001b[31m33.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hDownloading pyproject_hooks-1.2.0-py3-none-any.whl (10 kB)\n",
"Downloading asgiref-3.8.1-py3-none-any.whl (23 kB)\n",
"Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hBuilding wheels for collected packages: pypika\n",
" Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
" Created wheel for pypika: filename=pypika-0.48.9-py2.py3-none-any.whl size=53801 sha256=58297097b83f8ee9a1f1c261b94276ddde1fd9ecb4c6af88a884db4519668311\n",
" Stored in directory: /root/.cache/pip/wheels/a3/01/bd/4c40ceb9d5354160cb186dcc153360f4ab7eb23e2b24daf96d\n",
"Successfully built pypika\n",
"Installing collected packages: pypika, pydub, monotonic, durationpy, uvloop, uvicorn, tomlkit, semantic-version, ruff, python-multipart, python-dotenv, pyproject_hooks, overrides, opentelemetry-util-http, opentelemetry-proto, mmh3, humanfriendly, httptools, groovy, ffmpy, chroma-hnswlib, bcrypt, backoff, asgiref, aiofiles, watchfiles, tiktoken, starlette, posthog, opentelemetry-exporter-otlp-proto-common, coloredlogs, build, safehttpx, onnxruntime, kubernetes, gradio-client, fastapi, opentelemetry-instrumentation, langchain-core, gradio, opentelemetry-instrumentation-asgi, opentelemetry-exporter-otlp-proto-grpc, langchain_openai, opentelemetry-instrumentation-fastapi, chromadb\n",
" Attempting uninstall: langchain-core\n",
" Found existing installation: langchain-core 0.3.52\n",
" Uninstalling langchain-core-0.3.52:\n",
" Successfully uninstalled langchain-core-0.3.52\n",
"Successfully installed aiofiles-24.1.0 asgiref-3.8.1 backoff-2.2.1 bcrypt-4.3.0 build-1.2.2.post1 chroma-hnswlib-0.7.6 chromadb-1.0.6 coloredlogs-15.0.1 durationpy-0.9 fastapi-0.115.9 ffmpy-0.5.0 gradio-5.25.2 gradio-client-1.8.0 groovy-0.1.2 httptools-0.6.4 humanfriendly-10.0 kubernetes-32.0.1 langchain-core-0.3.55 langchain_openai-0.3.14 mmh3-5.1.0 monotonic-1.6 onnxruntime-1.21.1 opentelemetry-exporter-otlp-proto-common-1.32.1 opentelemetry-exporter-otlp-proto-grpc-1.32.1 opentelemetry-instrumentation-0.53b1 opentelemetry-instrumentation-asgi-0.53b1 opentelemetry-instrumentation-fastapi-0.53b1 opentelemetry-proto-1.32.1 opentelemetry-util-http-0.53b1 overrides-7.7.0 posthog-3.25.0 pydub-0.25.1 pypika-0.48.9 pyproject_hooks-1.2.0 python-dotenv-1.1.0 python-multipart-0.0.20 ruff-0.11.6 safehttpx-0.1.6 semantic-version-2.10.0 starlette-0.45.3 tiktoken-0.9.0 tomlkit-0.13.2 uvicorn-0.34.2 uvloop-0.21.0 watchfiles-1.0.5\n"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"#Core system"
],
"metadata": {
"id": "WpW8pKv1Zi3r"
}
},
{
"cell_type": "code",
"source": [
"# core_system.py - Modified with fixed exam functionality\n",
"\n",
"import os\n",
"import json\n",
"import datetime\n",
"import time\n",
"from datetime import timedelta\n",
"from typing import List, Dict, Any, Optional\n",
"\n",
"# LLM Integration using LangChain\n",
"class LLMService:\n",
" def __init__(self, api_key):\n",
" self.api_key = api_key\n",
" # Changed from ChatOpenAI to ChatGroq\n",
" try:\n",
" from langchain_groq import ChatGroq\n",
" self.chat_model = ChatGroq(\n",
" model=\"llama3-70b-8192\", # Using a Groq compatible model\n",
" temperature=0.2,\n",
" groq_api_key=api_key\n",
" )\n",
" except ImportError:\n",
" # Fallback to direct API calls if langchain_groq is not available\n",
" import requests\n",
" self.chat_model = None\n",
"\n",
" def create_chain(self, template: str, output_key: str = \"result\"):\n",
" if self.chat_model:\n",
" from langchain.prompts import ChatPromptTemplate\n",
" from langchain.chains import LLMChain\n",
"\n",
" chat_prompt = ChatPromptTemplate.from_template(template)\n",
" return LLMChain(\n",
" llm=self.chat_model,\n",
" prompt=chat_prompt,\n",
" output_key=output_key,\n",
" verbose=True\n",
" )\n",
" return None\n",
"\n",
" def get_completion(self, prompt: str) -> str:\n",
" if self.chat_model:\n",
" chain = self.create_chain(prompt)\n",
" response = chain.invoke({\"input\": \"\"})\n",
" return response[\"result\"]\n",
" else:\n",
" # Direct API call if langchain is not available\n",
" import requests\n",
" headers = {\n",
" \"Authorization\": f\"Bearer {self.api_key}\",\n",
" \"Content-Type\": \"application/json\"\n",
" }\n",
" data = {\n",
" \"model\": \"llama3-70b-8192\",\n",
" \"messages\": [{\"role\": \"user\", \"content\": prompt}],\n",
" \"temperature\": 0.7,\n",
" \"max_tokens\": 2048\n",
" }\n",
" response = requests.post(\n",
" \"https://api.groq.com/openai/v1/chat/completions\",\n",
" headers=headers,\n",
" json=data\n",
" )\n",
"\n",
" if response.status_code == 200:\n",
" return response.json()[\"choices\"][0][\"message\"][\"content\"]\n",
" else:\n",
" raise Exception(f\"API Error: {response.status_code} - {response.text}\")\n",
"\n",
" def generate_module_content(self, day: int, topic: str) -> str:\n",
" prompt = f\"\"\"\n",
" Create a comprehensive Python programming module for Day {day} covering {topic}.\n",
" The module should follow this structure in Markdown format:\n",
"\n",
" # [Module Title]\n",
"\n",
" ## Introduction\n",
" [A brief introduction to the day's topics]\n",
"\n",
" ## Section 1: [Section Title]\n",
" [Detailed explanation of concepts]\n",
"\n",
" ### Code Examples\n",
" ```python\n",
" # Example code with comments\n",
" ```\n",
"\n",
" ### Practice Exercises\n",
" [2-3 exercises with clear instructions]\n",
"\n",
" ## Section 2: [Section Title]\n",
" [Repeat the pattern for all relevant topics]\n",
"\n",
" Make sure the content is:\n",
" - Comprehensive but focused on the day's topic\n",
" - Includes clear examples with comments\n",
" - Has practice exercises that build skills progressively\n",
" - Uses proper Markdown formatting\n",
" \"\"\"\n",
" return self.get_completion(prompt)\n",
"\n",
" def generate_exam_questions(self, day: int, topic: str, previous_mistakes: List[Dict] = None) -> List[Dict]:\n",
" mistake_context = \"\"\n",
" if previous_mistakes and len(previous_mistakes) > 0:\n",
" mistakes = \"\\n\".join([\n",
" f\"- Question: {m['question']}\\n Wrong Answer: {m['user_answer']}\\n Correct Answer: {m['correct_answer']}\"\n",
" for m in previous_mistakes[:3]\n",
" ])\n",
" mistake_context = f\"\"\"\n",
" Include variations of questions related to these previous mistakes:\n",
" {mistakes}\n",
" \"\"\"\n",
"\n",
" prompt = f\"\"\"\n",
" Create a 1-hour Python exam for Day {day} covering {topic}.\n",
" {mistake_context}\n",
"\n",
" Include 5 questions with a mix of:\n",
" - Multiple-choice (4 options each)\n",
" - Short-answer (requiring 1-3 lines of text)\n",
" - Coding exercises (simple functions or snippets)\n",
"\n",
" Return your response as a JSON array where each question is an object with these fields:\n",
" - question_type: \"multiple-choice\", \"short-answer\", or \"coding\"\n",
" - question_text: The full question text\n",
" - options: Array of options (for multiple-choice only)\n",
" - correct_answer: The correct answer or solution\n",
" - explanation: Detailed explanation of the correct answer\n",
" - difficulty: Number from 1 (easiest) to 5 (hardest)\n",
"\n",
" Example:\n",
" [\n",
" {{\n",
" \"question_type\": \"multiple-choice\",\n",
" \"question_text\": \"What is the output of print(3 * '4' + '5')?\",\n",
" \"options\": [\"12\", \"445\", \"4445\", \"Error\"],\n",
" \"correct_answer\": \"4445\",\n",
" \"explanation\": \"The * operator with a string repeats it, and + concatenates strings\",\n",
" \"difficulty\": 2\n",
" }},\n",
" {{\n",
" \"question_type\": \"coding\",\n",
" \"question_text\": \"Write a function that returns the sum of all even numbers in a list.\",\n",
" \"options\": null,\n",
" \"correct_answer\": \"def sum_even(numbers):\\\\n return sum(x for x in numbers if x % 2 == 0)\",\n",
" \"explanation\": \"This solution uses a generator expression with the sum function to add only even numbers\",\n",
" \"difficulty\": 3\n",
" }}\n",
" ]\n",
"\n",
" ONLY return the valid JSON array. Do NOT include any explanatory text or code fences.\n",
" \"\"\"\n",
"\n",
" result = self.get_completion(prompt)\n",
"\n",
" # Clean up potential formatting issues\n",
" result = result.strip()\n",
" if result.startswith(\"```json\"):\n",
" result = result.split(\"```json\")[1]\n",
" if result.endswith(\"```\"):\n",
" result = result.rsplit(\"```\", 1)[0]\n",
"\n",
" try:\n",
" return json.loads(result)\n",
" except json.JSONDecodeError as e:\n",
" print(f\"JSON decode error: {e}\")\n",
" print(f\"Raw response: {result}\")\n",
" # Fall back to creating a minimal structure\n",
" return [{\"question_type\": \"short-answer\",\n",
" \"question_text\": \"There was an error generating questions. Please describe what you've learned today.\",\n",
" \"options\": None,\n",
" \"correct_answer\": \"Any reasonable summary\",\n",
" \"explanation\": \"This is a backup question\",\n",
" \"difficulty\": 1}]\n",
"\n",
" def evaluate_answer(self, question: Dict, user_answer: str) -> Dict:\n",
" prompt = f\"\"\"\n",
" Grade this response to a Python programming question:\n",
"\n",
" Question Type: {question[\"question_type\"]}\n",
" Question: {question[\"question_text\"]}\n",
" Correct Answer: {question[\"correct_answer\"]}\n",
" Student's Answer: {user_answer}\n",
"\n",
" Return your evaluation as a JSON object with these fields:\n",
" - is_correct: boolean (true/false)\n",
" - feedback: detailed explanation of what was correct/incorrect\n",
" - correct_solution: the correct solution with explanation if the answer was wrong\n",
"\n",
" For coding questions, be somewhat lenient - focus on logic correctness rather than exact syntax matching.\n",
" For multiple choice, it must match the correct option.\n",
" For short answer, assess if the key concepts are present and correct.\n",
"\n",
" ONLY return the valid JSON object. Do NOT include any explanatory text.\n",
" \"\"\"\n",
"\n",
" result = self.get_completion(prompt)\n",
"\n",
" # Clean up potential formatting issues\n",
" result = result.strip()\n",
" if result.startswith(\"```json\"):\n",
" result = result.split(\"```json\")[1]\n",
" if result.endswith(\"```\"):\n",
" result = result.rsplit(\"```\", 1)[0]\n",
"\n",
" try:\n",
" return json.loads(result)\n",
" except json.JSONDecodeError as e:\n",
" print(f\"JSON decode error: {e}\")\n",
" print(f\"Raw response: {result}\")\n",
" # Return a fallback response\n",
" return {\n",
" \"is_correct\": False,\n",
" \"feedback\": \"There was an error evaluating your answer. Please try again.\",\n",
" \"correct_solution\": question[\"correct_answer\"]\n",
" }\n",
"\n",
" def answer_student_question(self, question: str, context: Optional[str] = None) -> str:\n",
" context_text = f\"Context from previous questions: {context}\\n\\n\" if context else \"\"\n",
"\n",
" prompt = f\"\"\"\n",
" {context_text}You are an expert Python tutor. Answer this student's question clearly with explanations and examples:\n",
"\n",
" {question}\n",
"\n",
" - Use code examples where appropriate\n",
" - Break down complex concepts step by step\n",
" - Be comprehensive but concise\n",
" - Use proper Markdown formatting for code\n",
" \"\"\"\n",
"\n",
" return self.get_completion(prompt)\n",
"\n",
"# Content Generator with simplified storage\n",
"class ContentGenerator:\n",
" def __init__(self, api_key):\n",
" self.llm_service = LLMService(api_key)\n",
" # Simplified in-memory storage\n",
" self.modules = []\n",
" self.questions = []\n",
" self.responses = []\n",
" self.chat_logs = []\n",
"\n",
" def generate_module(self, day: int) -> tuple:\n",
" day_topics = {\n",
" 1: \"Python fundamentals (variables, data types, control structures)\",\n",
" 2: \"Intermediate Python (functions, modules, error handling)\",\n",
" 3: \"Advanced Python (file I/O, object-oriented programming, key libraries)\"\n",
" }\n",
" topic = day_topics.get(day, \"Python programming\")\n",
"\n",
" content = self.llm_service.generate_module_content(day, topic)\n",
"\n",
" # Extract title from content\n",
" title = f\"Day {day} Python Module\"\n",
" if content.startswith(\"# \"):\n",
" title_line = content.split(\"\\n\", 1)[0]\n",
" title = title_line.replace(\"# \", \"\").strip()\n",
"\n",
" # Save to in-memory storage\n",
" module_id = len(self.modules) + 1\n",
" self.modules.append({\n",
" \"id\": module_id,\n",
" \"day\": day,\n",
" \"title\": title,\n",
" \"content\": content,\n",
" \"created_at\": datetime.datetime.utcnow()\n",
" })\n",
"\n",
" return content, module_id\n",
"\n",
" def generate_exam(self, day: int, module_id: int, previous_mistakes: List = None) -> tuple:\n",
" day_topics = {\n",
" 1: \"Python fundamentals (variables, data types, control structures)\",\n",
" 2: \"Intermediate Python (functions, modules, error handling)\",\n",
" 3: \"Advanced Python (file I/O, object-oriented programming, key libraries)\"\n",
" }\n",
" topic = day_topics.get(day, \"Python programming\")\n",
"\n",
" # Generate questions for this day's topics\n",
" try:\n",
" questions_data = self.llm_service.generate_exam_questions(day, topic, previous_mistakes)\n",
"\n",
" if not questions_data:\n",
" raise ValueError(\"Failed to generate exam questions\")\n",
"\n",
" saved_questions = []\n",
" for q_data in questions_data:\n",
" question_id = len(self.questions) + 1\n",
"\n",
" question = {\n",
" \"id\": question_id,\n",
" \"module_id\": module_id,\n",
" \"question_type\": q_data[\"question_type\"],\n",
" \"question_text\": q_data[\"question_text\"],\n",
" \"options\": q_data.get(\"options\"),\n",
" \"correct_answer\": q_data[\"correct_answer\"],\n",
" \"explanation\": q_data[\"explanation\"],\n",
" \"difficulty\": q_data.get(\"difficulty\", 3)\n",
" }\n",
"\n",
" self.questions.append(question)\n",
" saved_questions.append(question)\n",
"\n",
" return questions_data, saved_questions\n",
" except Exception as e:\n",
" print(f\"Error generating exam: {str(e)}\")\n",
" # Create a simple fallback question\n",
" fallback_question = {\n",
" \"question_type\": \"short-answer\",\n",
" \"question_text\": f\"Explain a key concept you learned in Day {day} about {topic}.\",\n",
" \"options\": None,\n",
" \"correct_answer\": \"Any reasonable explanation\",\n",
" \"explanation\": \"This is a fallback question due to an error in question generation\",\n",
" \"difficulty\": 2\n",
" }\n",
"\n",
" question_id = len(self.questions) + 1\n",
" question = {\n",
" \"id\": question_id,\n",
" \"module_id\": module_id,\n",
" \"question_type\": fallback_question[\"question_type\"],\n",
" \"question_text\": fallback_question[\"question_text\"],\n",
" \"options\": fallback_question.get(\"options\"),\n",
" \"correct_answer\": fallback_question[\"correct_answer\"],\n",
" \"explanation\": fallback_question[\"explanation\"],\n",
" \"difficulty\": fallback_question[\"difficulty\"]\n",
" }\n",
"\n",
" self.questions.append(question)\n",
" return [fallback_question], [question]\n",
"\n",
" def grade_response(self, question_id: int, user_answer: str) -> Dict:\n",
" # Find question in memory\n",
" question = next((q for q in self.questions if q[\"id\"] == question_id), None)\n",
"\n",
" if not question:\n",
" return {\"error\": \"Question not found\"}\n",
"\n",
" try:\n",
" feedback_data = self.llm_service.evaluate_answer(question, user_answer)\n",
"\n",
" # Save response to in-memory storage\n",
" response_id = len(self.responses) + 1\n",
" response = {\n",
" \"id\": response_id,\n",
" \"question_id\": question_id,\n",
" \"user_answer\": user_answer,\n",
" \"is_correct\": feedback_data.get(\"is_correct\", False),\n",
" \"feedback\": feedback_data.get(\"feedback\", \"\"),\n",
" \"timestamp\": datetime.datetime.utcnow()\n",
" }\n",
" self.responses.append(response)\n",
"\n",
" return feedback_data\n",
" except Exception as e:\n",
" print(f\"Error grading response: {str(e)}\")\n",
" # Create a fallback response\n",
" response_id = len(self.responses) + 1\n",
" response = {\n",
" \"id\": response_id,\n",
" \"question_id\": question_id,\n",
" \"user_answer\": user_answer,\n",
" \"is_correct\": False,\n",
" \"feedback\": f\"Error evaluating answer: {str(e)}\",\n",
" \"timestamp\": datetime.datetime.utcnow()\n",
" }\n",
" self.responses.append(response)\n",
"\n",
" return {\n",
" \"is_correct\": False,\n",
" \"feedback\": f\"Error evaluating answer: {str(e)}\",\n",
" \"correct_solution\": question[\"correct_answer\"]\n",
" }\n",
"\n",
" def get_previous_mistakes(self, day: int) -> List:\n",
" \"\"\"Get mistakes from previous days to inform adaptive content\"\"\"\n",
" if day <= 1:\n",
" return []\n",
"\n",
" previous_day = day - 1\n",
"\n",
" # Find modules from previous day\n",
" previous_modules = [m for m in self.modules if m[\"day\"] == previous_day]\n",
"\n",
" if not previous_modules:\n",
" return []\n",
"\n",
" module_ids = [module[\"id\"] for module in previous_modules]\n",
" questions = [q for q in self.questions if q[\"module_id\"] in module_ids]\n",
"\n",
" if not questions:\n",
" return []\n",
"\n",
" question_ids = [question[\"id\"] for question in questions]\n",
" incorrect_responses = [r for r in self.responses if r[\"question_id\"] in question_ids and not r[\"is_correct\"]]\n",
"\n",
" mistakes = []\n",
" for response in incorrect_responses:\n",
" question = next((q for q in self.questions if q[\"id\"] == response[\"question_id\"]), None)\n",
" if question:\n",
" mistakes.append({\n",
" \"question\": question[\"question_text\"],\n",
" \"user_answer\": response[\"user_answer\"],\n",
" \"correct_answer\": question[\"correct_answer\"]\n",
" })\n",
"\n",
" return mistakes\n",
"\n",
" def answer_question(self, user_question: str, related_question_id: Optional[int] = None) -> str:\n",
" # Get context from related question if available\n",
" context = None\n",
" if related_question_id:\n",
" question = next((q for q in self.questions if q[\"id\"] == related_question_id), None)\n",
" if question:\n",
" context = f\"Question: {question['question_text']}\\nCorrect Answer: {question['correct_answer']}\\nExplanation: {question['explanation']}\"\n",
"\n",
" response = self.llm_service.answer_student_question(user_question, context)\n",
"\n",
" # Log the interaction\n",
" chat_log_id = len(self.chat_logs) + 1\n",
" chat_log = {\n",
" \"id\": chat_log_id,\n",
" \"user_question\": user_question,\n",
" \"ai_response\": response,\n",
" \"related_question_id\": related_question_id,\n",
" \"timestamp\": datetime.datetime.utcnow()\n",
" }\n",
" self.chat_logs.append(chat_log)\n",
"\n",
" return response\n",
"\n",
"# Learning System Class\n",
"class LearningSystem:\n",
" def __init__(self, api_key):\n",
" self.content_generator = ContentGenerator(api_key)\n",
" self.current_day = 1\n",
" self.current_module_id = None\n",
" self.exam_start_time = None\n",
" self.exam_in_progress = False\n",
" self.exam_questions = []\n",
" self.questions_data = [] # Store the questions data for display\n",
"\n",
" def generate_day_content(self):\n",
" content, module_id = self.content_generator.generate_module(self.current_day)\n",
" self.current_module_id = module_id\n",
" return content\n",
"\n",
" def start_exam(self):\n",
" try:\n",
" if not self.current_module_id:\n",
" # Check if we already have a module for this day\n",
" existing_module = next((m for m in self.content_generator.modules if m[\"day\"] == self.current_day), None)\n",
" if existing_module:\n",
" self.current_module_id = existing_module[\"id\"]\n",
" else:\n",
" # Generate content for the day if not already done\n",
" content, module_id = self.content_generator.generate_module(self.current_day)\n",
" self.current_module_id = module_id\n",
"\n",
" # Get previous mistakes for adaptive learning\n",
" previous_mistakes = self.content_generator.get_previous_mistakes(self.current_day)\n",
"\n",
" # Generate exam questions\n",
" self.questions_data, self.exam_questions = self.content_generator.generate_exam(\n",
" self.current_day,\n",
" self.current_module_id,\n",
" previous_mistakes\n",
" )\n",
"\n",
" if not self.questions_data or not self.exam_questions:\n",
" return \"Failed to generate exam questions. Please try again.\"\n",
"\n",
" self.exam_start_time = datetime.datetime.now()\n",
" self.exam_in_progress = True\n",
"\n",
" # Format the exam for display\n",
" exam_text = f\"# Day {self.current_day} Python Exam\\n\\n\"\n",
" exam_text += f\"**Time Limit:** 1 hour\\n\"\n",
" exam_text += f\"**Start Time:** {self.exam_start_time.strftime('%H:%M:%S')}\\n\"\n",
" exam_text += f\"**End Time:** {(self.exam_start_time + timedelta(hours=1)).strftime('%H:%M:%S')}\\n\\n\"\n",
"\n",
" # Add adaptive learning notice if applicable\n",
" if previous_mistakes and len(previous_mistakes) > 0:\n",
" exam_text += f\"**Note:** This exam includes questions based on topics you had difficulty with previously.\\n\\n\"\n",
"\n",
" for i, question in enumerate(self.questions_data):\n",
" exam_text += f\"## Question {i+1}: {question['question_type'].title()}\\n\\n\"\n",
" exam_text += f\"{question['question_text']}\\n\\n\"\n",
"\n",
" if question['question_type'] == \"multiple-choice\" and question.get('options'):\n",
" for j, option in enumerate(question['options']):\n",
" exam_text += f\"- {chr(65+j)}. {option}\\n\"\n",
"\n",
" exam_text += \"\\n\"\n",
"\n",
" exam_text += \"## Instructions for submitting answers:\\n\\n\"\n",
" exam_text += \"1. For multiple-choice questions, input the letter of your answer (A, B, C, or D)\\n\"\n",
" exam_text += \"2. For short-answer questions, write your complete answer\\n\"\n",
" exam_text += \"3. For coding questions, write your complete code solution\\n\"\n",
" exam_text += \"4. **Separate each answer with two line breaks**\\n\\n\"\n",
"\n",
" return exam_text\n",
" except Exception as e:\n",
" self.exam_in_progress = False\n",
" return f\"Error starting exam: {str(e)}\"\n",
"\n",
" def submit_exam(self, answers_text):\n",
" try:\n",
" if not self.exam_in_progress:\n",
" return \"No exam is currently in progress. Please start an exam first.\"\n",
"\n",
" if not self.exam_questions:\n",
" return \"No exam questions available. Please restart the exam.\"\n",
"\n",
" # Check time\n",
" current_time = datetime.datetime.now()\n",
" if current_time > self.exam_start_time + timedelta(hours=1):\n",
" time_overrun = current_time - (self.exam_start_time + timedelta(hours=1))\n",
" overrun_minutes = time_overrun.total_seconds() / 60\n",
" time_notice = f\"Time limit exceeded by {overrun_minutes:.1f} minutes. Your answers are being processed anyway.\"\n",
" else:\n",
" time_notice = \"Exam completed within the time limit.\"\n",
"\n",
" # Split answers by question (double newline separator)\n",
" answers = [ans.strip() for ans in answers_text.split(\"\\n\\n\") if ans.strip()]\n",
"\n",
" feedback_text = f\"# Day {self.current_day} Exam Results\\n\\n\"\n",
" feedback_text += f\"{time_notice}\\n\\n\"\n",
"\n",
" correct_count = 0\n",
" total_evaluated = 0\n",
"\n",
" # Ensure we don't exceed the number of questions\n",
" num_questions = min(len(self.exam_questions), len(answers))\n",
"\n",
" # If the user provided fewer answers than questions, fill in blanks\n",
" while len(answers) < len(self.exam_questions):\n",
" answers.append(\"\")\n",
"\n",
" for i in range(len(self.exam_questions)):\n",
" question = self.exam_questions[i]\n",
" answer = answers[i] if i < len(answers) else \"\"\n",
"\n",
" # Handle empty answers\n",
" if not answer:\n",
" feedback_text += f\"## Question {i+1}\\n\\n\"\n",
" feedback_text += \"**Your Answer:** No answer provided\\n\\n\"\n",
" feedback_text += \"**Result:** Incorrect\\n\\n\"\n",
" feedback_text += f\"**Correct Solution:** {question['correct_answer']}\\n\\n\"\n",
" total_evaluated += 1\n",
" continue\n",
"\n",
" try:\n",
" # Grade the response\n",
" feedback = self.content_generator.grade_response(question[\"id\"], answer)\n",
" total_evaluated += 1\n",
"\n",
" # Format feedback\n",
" feedback_text += f\"## Question {i+1}\\n\\n\"\n",
" feedback_text += f\"**Your Answer:**\\n{answer}\\n\\n\"\n",
" feedback_text += f\"**Result:** {'✅ Correct' if feedback.get('is_correct', False) else '❌ Incorrect'}\\n\\n\"\n",
" feedback_text += f\"**Feedback:**\\n{feedback.get('feedback', '')}\\n\\n\"\n",
"\n",
" if feedback.get('is_correct', False):\n",
" correct_count += 1\n",
" else:\n",
" feedback_text += f\"**Correct Solution:**\\n{feedback.get('correct_solution', '')}\\n\\n\"\n",
" except Exception as e:\n",
" feedback_text += f\"## Question {i+1}\\n\\n\"\n",
" feedback_text += f\"**Error grading answer:** {str(e)}\\n\\n\"\n",
"\n",
" # Calculate score\n",
" if total_evaluated > 0:\n",
" score = correct_count / total_evaluated * 100\n",
" else:\n",
" score = 0\n",
"\n",
" feedback_text += f\"# Final Score: {score:.1f}%\\n\\n\"\n",
"\n",
" # Suggestions for improvement\n",
" if score < 100:\n",
" feedback_text += \"## Suggestions for Improvement\\n\\n\"\n",
" if score < 60:\n",
" feedback_text += \"- Review the fundamental concepts again\\n\"\n",
" feedback_text += \"- Practice more with the code examples\\n\"\n",
" feedback_text += \"- Use the Q&A Sandbox to ask about difficult topics\\n\"\n",
" elif score < 80:\n",
" feedback_text += \"- Focus on the specific areas where you made mistakes\\n\"\n",
" feedback_text += \"- Try rewriting the solutions for incorrect answers\\n\"\n",
" else:\n",
" feedback_text += \"- Great job! Just a few minor issues to review\\n\"\n",
" feedback_text += \"- Look at the explanations for the few questions you missed\\n\"\n",
" else:\n",
" feedback_text += \"## Excellent Work!\\n\\n\"\n",
" feedback_text += \"You've mastered today's content. Ready for the next day's material!\\n\"\n",
"\n",
" self.exam_in_progress = False\n",
" return feedback_text\n",
" except Exception as e:\n",
" self.exam_in_progress = False\n",
" return f\"Error submitting exam: {str(e)}\"\n",
"\n",
" def answer_sandbox_question(self, question):\n",
" return self.content_generator.answer_question(question)\n",
"\n",
" def advance_to_next_day(self):\n",
" if self.current_day < 3:\n",
" self.current_day += 1\n",
" self.current_module_id = None\n",
" self.exam_questions = []\n",
" return f\"Advanced to Day {self.current_day}.\"\n",
" else:\n",
" return \"You have completed the 3-day curriculum.\"\n",
"\n",
" def get_learning_progress(self):\n",
" try:\n",
" modules = self.content_generator.modules\n",
" questions = self.content_generator.questions\n",
" responses = self.content_generator.responses\n",
"\n",
" total_questions = len(questions)\n",
" answered_questions = len(responses)\n",
" correct_answers = sum(1 for r in responses if r[\"is_correct\"])\n",
"\n",
" if answered_questions > 0:\n",
" accuracy = correct_answers / answered_questions * 100\n",
" else:\n",
" accuracy = 0\n",
"\n",
" report = \"# Learning Progress Summary\\n\\n\"\n",
" report += f\"## Overall Statistics\\n\"\n",
" report += f\"- Total modules completed: {len(modules)}\\n\"\n",
" report += f\"- Total questions attempted: {answered_questions}/{total_questions}\\n\"\n",
" report += f\"- Overall accuracy: {accuracy:.1f}%\\n\\n\"\n",
"\n",
" # Day-by-day progress with adaptive learning info\n",
" for day in range(1, 4):\n",
" day_modules = [m for m in modules if m[\"day\"] == day]\n",
"\n",
" report += f\"## Day {day}: \"\n",
" if day_modules:\n",
" report += f\"{day_modules[0]['title']}\\n\"\n",
"\n",
" day_questions = [q for q in questions if q[\"module_id\"] in [m[\"id\"] for m in day_modules]]\n",
" day_responses = [r for r in responses if r[\"question_id\"] in [q[\"id\"] for q in day_questions]]\n",
"\n",
" day_total = len(day_questions)\n",
" day_answered = len(day_responses)\n",
" day_correct = sum(1 for r in day_responses if r[\"is_correct\"])\n",
"\n",
" if day_answered > 0:\n",
" day_accuracy = day_correct / day_answered * 100\n",
" report += f\"- **Exam Score:** {day_accuracy:.1f}%\\n\"\n",
" else:\n",
" report += \"- **Exam:** Not taken yet\\n\"\n",
"\n",
" report += f\"- Questions attempted: {day_answered}/{day_total}\\n\"\n",
"\n",
" # Show adaptive learning details\n",
" if day > 1:\n",
" previous_mistakes = self.content_generator.get_previous_mistakes(day)\n",
" if previous_mistakes:\n",
" report += f\"- **Adaptive Learning:** {len(previous_mistakes)} topics from Day {day-1} reinforced\\n\"\n",
"\n",
" # Show exam results if available\n",
" if day_answered > 0:\n",
" report += \"### Exam Performance\\n\"\n",
"\n",
" # Group by question type\n",
" question_types = set(q[\"question_type\"] for q in day_questions)\n",
" for q_type in question_types:\n",
" type_questions = [q for q in day_questions if q[\"question_type\"] == q_type]\n",
" type_responses = [r for r in day_responses if r[\"question_id\"] in [q[\"id\"] for q in type_questions]]\n",
" type_correct = sum(1 for r in type_responses if r[\"is_correct\"])\n",
"\n",
" if type_responses:\n",
" type_accuracy = type_correct / len(type_responses) * 100\n",
" report += f\"- **{q_type.title()}:** {type_accuracy:.1f}% correct\\n\"\n",
"\n",
" # Common mistakes\n",
" incorrect_responses = [r for r in day_responses if not r[\"is_correct\"]]\n",
" if incorrect_responses:\n",
" report += \"\\n### Areas for Improvement\\n\"\n",
"\n",
" for resp in incorrect_responses[:3]: # Show top 3 mistakes\n",
" question = next((q for q in questions if q[\"id\"] == resp[\"question_id\"]), None)\n",
" if question:\n",
" report += f\"- **Question:** {question['question_text'][:100]}...\\n\"\n",
" report += f\" **Your Answer:** {resp['user_answer'][:100]}...\\n\"\n",
" report += f\" **Correct Answer:** {question['correct_answer'][:100]}...\\n\\n\"\n",
" else:\n",
" report += \"Not started yet\\n\"\n",
"\n",
" report += \"\\n\"\n",
"\n",
" # Learning recommendations\n",
" report += \"## Recommendations\\n\\n\"\n",
" if correct_answers < answered_questions * 0.7:\n",
" report += \"- Review the modules before moving to the next day\\n\"\n",
" report += \"- Focus on practicing code examples\\n\"\n",
" report += \"- Use the Q&A Sandbox to clarify difficult concepts\\n\"\n",
" else:\n",
" report += \"- Continue with the current pace\\n\"\n",
" report += \"- Try to implement small projects using what you've learned\\n\"\n",
"\n",
" return report\n",
" except Exception as e:\n",
" return f\"Error generating progress report: {str(e)}\""
],
"metadata": {
"id": "pJaXDk_Lmyg9"
},
"execution_count": 19,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"#gradio"
],
"metadata": {
"id": "GFXvlb36fKOd"
}
},
{
"cell_type": "code",
"source": [
"# Gradio UI - Modified for Google Colab\n",
"import os\n",
"import gradio as gr\n",
"\n",
"# Note: We're not importing from core_system\n",
"# Instead, we'll use the classes already defined in the previous cell\n",
"\n",
"def create_interface():\n",
" # System initialization section\n",
" def initialize_system(api_key_value):\n",
" if not api_key_value or len(api_key_value) < 10: # Basic validation\n",
" return \"Please enter a valid API key.\", gr.update(visible=False), None\n",
"\n",
" try:\n",
" # Test API connection\n",
" test_service = LLMService(api_key_value)\n",
" test_response = test_service.get_completion(\"Say hello\")\n",
"\n",
" if len(test_response) > 0:\n",
" learning_system = LearningSystem(api_key_value)\n",
" return \"✅ System initialized successfully! You can now use the learning system.\", gr.update(visible=True), learning_system\n",
" else:\n",
" return \"❌ API connection test failed. Please check your API key.\", gr.update(visible=False), None\n",
" except Exception as e:\n",
" return f\"❌ Error initializing system: {str(e)}\", gr.update(visible=False), None\n",
"\n",
" with gr.Blocks(title=\"AI-Powered Python Learning System\", theme=\"soft\") as interface:\n",
" # Store learning system state\n",
" learning_system_state = gr.State(None)\n",
"\n",
" # Header\n",
" gr.Markdown(\n",
" \"\"\"\n",
" <div style=\"text-align: center; margin-bottom: 20px;\">\n",
" <h1 style=\"color: #4a69bd; font-size: 2.5em;\">AI-Powered Python Learning System</h1>\n",
" <p style=\"font-size: 1.2em; color: #444;\">Master Python programming with personalized AI tutoring</p>\n",
" </div>\n",
" \"\"\"\n",
" )\n",
"\n",
" # API Key input - outside the tabs\n",
" with gr.Row():\n",
" # Try to get API key from environment variable\n",
" API_KEY = os.environ.get(\"GROQ_API_KEY\", \"\")\n",
" api_key_input = gr.Textbox(\n",
" label=\"Enter your Groq API Key\",\n",
" placeholder=\"gsk_...\",\n",
" type=\"password\",\n",
" value=API_KEY # Use environment variable if available\n",
" )\n",
" init_btn = gr.Button(\"Initialize System\", variant=\"primary\")\n",
"\n",
" init_status = gr.Markdown(\"Enter your Groq API key and click 'Initialize System' to begin.\")\n",
"\n",
" # Main interface container - hidden until initialized\n",
" with gr.Column(visible=False) as main_interface:\n",
" with gr.Tabs() as tabs:\n",
" # Content & Learning tab\n",
" with gr.Tab(\"Content & Learning\"):\n",
" with gr.Row():\n",
" day_display = gr.Markdown(\"## Current Day: 1\")\n",
"\n",
" with gr.Row():\n",
" generate_content_btn = gr.Button(\"Generate Today's Content\", variant=\"primary\")\n",
" next_day_btn = gr.Button(\"Advance to Next Day\", variant=\"secondary\")\n",
"\n",
" content_display = gr.Markdown(\"Click 'Generate Today's Content' to begin.\")\n",
"\n",
" # Exam tab\n",
" with gr.Tab(\"Exam\"):\n",
" with gr.Row():\n",
" start_exam_btn = gr.Button(\"Start Exam\", variant=\"primary\")\n",
"\n",
" exam_display = gr.Markdown(\"Click 'Start Exam' to begin the assessment.\")\n",
"\n",
" with gr.Row():\n",
" exam_answers = gr.Textbox(\n",
" label=\"Enter your answers (separate each answer with two line breaks)\",\n",
" placeholder=\"Answer 1\\n\\nAnswer 2\\n\\nAnswer 3...\",\n",
" lines=15\n",
" )\n",
"\n",
" submit_exam_btn = gr.Button(\"Submit Exam\", variant=\"primary\")\n",
"\n",
" exam_feedback = gr.Markdown(\"Your exam results will appear here.\")\n",
"\n",
" # Q&A Sandbox tab\n",
" with gr.Tab(\"Q&A Sandbox\"):\n",
" with gr.Row():\n",
" question_input = gr.Textbox(\n",
" label=\"Ask any question about Python\",\n",
" placeholder=\"Enter your question here...\",\n",
" lines=3\n",
" )\n",
"\n",
" ask_btn = gr.Button(\"Ask Question\", variant=\"primary\")\n",
"\n",
" answer_display = gr.Markdown(\"Ask a question to get started.\")\n",
"\n",
" # Progress Report tab\n",
" with gr.Tab(\"Progress Report\"):\n",
" with gr.Row():\n",
" report_btn = gr.Button(\"Generate Progress Report\", variant=\"primary\")\n",
"\n",
" progress_display = gr.Markdown(\"Click 'Generate Progress Report' to see your learning statistics.\")\n",
"\n",
" # Custom functions to handle state\n",
" def generate_content(learning_system):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\"\n",
" return learning_system.generate_day_content()\n",
"\n",
" def advance_day(learning_system):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\", \"## Current Day: 1\"\n",
" result = learning_system.advance_to_next_day()\n",
" return result, f\"## Current Day: {learning_system.current_day}\"\n",
"\n",
" def start_exam(learning_system):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\"\n",
" try:\n",
" exam_content = learning_system.start_exam()\n",
" return exam_content\n",
" except Exception as e:\n",
" return f\"Error starting exam: {str(e)}\"\n",
"\n",
" def submit_exam(learning_system, answers):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\"\n",
" if not answers.strip():\n",
" return \"Please provide answers before submitting.\"\n",
"\n",
" try:\n",
" feedback = learning_system.submit_exam(answers)\n",
" return feedback\n",
" except Exception as e:\n",
" return f\"Error evaluating exam: {str(e)}\"\n",
"\n",
" def ask_question(learning_system, question):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\"\n",
" if not question.strip():\n",
" return \"Please enter a question.\"\n",
"\n",
" try:\n",
" answer = learning_system.answer_sandbox_question(question)\n",
" return answer\n",
" except Exception as e:\n",
" return f\"Error processing question: {str(e)}\"\n",
"\n",
" def generate_progress_report(learning_system):\n",
" if not learning_system:\n",
" return \"Please initialize the system first.\"\n",
"\n",
" try:\n",
" report = learning_system.get_learning_progress()\n",
" return report\n",
" except Exception as e:\n",
" return f\"Error generating progress report: {str(e)}\"\n",
"\n",
" # Set up event handlers\n",
" init_btn.click(\n",
" initialize_system,\n",
" inputs=[api_key_input],\n",
" outputs=[init_status, main_interface, learning_system_state]\n",
" )\n",
"\n",
" generate_content_btn.click(\n",
" generate_content,\n",
" inputs=[learning_system_state],\n",
" outputs=[content_display]\n",
" )\n",
"\n",
" next_day_btn.click(\n",
" advance_day,\n",
" inputs=[learning_system_state],\n",
" outputs=[content_display, day_display]\n",
" )\n",
"\n",
" start_exam_btn.click(\n",
" start_exam,\n",
" inputs=[learning_system_state],\n",
" outputs=[exam_display]\n",
" )\n",
"\n",
" submit_exam_btn.click(\n",
" submit_exam,\n",
" inputs=[learning_system_state, exam_answers],\n",
" outputs=[exam_feedback]\n",
" )\n",
"\n",
" ask_btn.click(\n",
" ask_question,\n",
" inputs=[learning_system_state, question_input],\n",
" outputs=[answer_display]\n",
" )\n",
"\n",
" report_btn.click(\n",
" generate_progress_report,\n",
" inputs=[learning_system_state],\n",
" outputs=[progress_display]\n",
" )\n",
"\n",
" return interface\n",
"\n",
"# Create and launch the interface\n",
"# For Colab, make sure to install gradio first if you haven't\n",
"# !pip install gradio\n",
"interface = create_interface()\n",
"interface.launch(share=True)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 611
},
"id": "XMuEPwonhq5L",
"outputId": "5fcd89e5-4004-4b4e-e7f1-b01b84bcc5fe"
},
"execution_count": 20,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
"* Running on public URL: https://af28b80e4f184f47a8.gradio.live\n",
"\n",
"This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
]
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"<IPython.core.display.HTML object>"
],
"text/html": [
"<div><iframe src=\"https://af28b80e4f184f47a8.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": []
},
"metadata": {},
"execution_count": 20
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "Chns0fHgNGxU"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "3a6ZeomONG0M"
},
"execution_count": null,
"outputs": []
}
]
}