diff --git "a/SmartSummarizer.ipynb" "b/SmartSummarizer.ipynb" --- "a/SmartSummarizer.ipynb" +++ "b/SmartSummarizer.ipynb" @@ -40,193 +40,142 @@ "name": "stdout", "output_type": "stream", "text": [ - "Requirement already satisfied: transformers in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from -r requirements.txt (line 1)) (4.51.3)\n", - "Collecting torch\n", - " Downloading torch-2.7.0-cp39-cp39-win_amd64.whl (212.4 MB)\n", - " -------------------------------------- 212.4/212.4 MB 1.5 MB/s eta 0:00:00\n", - "Collecting langdetect\n", - " Downloading langdetect-1.0.9.tar.gz (981 kB)\n", - " ------------------------------------- 981.5/981.5 kB 12.5 MB/s eta 0:00:00\n", + "Collecting transformers (from -r requirements.txt (line 1))\n", + " Using cached transformers-4.51.3-py3-none-any.whl.metadata (38 kB)\n", + "Collecting torch (from -r requirements.txt (line 2))\n", + " Using cached torch-2.7.0-cp312-cp312-win_amd64.whl.metadata (29 kB)\n", + "Collecting langdetect (from -r requirements.txt (line 3))\n", + " Using cached langdetect-1.0.9.tar.gz (981 kB)\n", " Preparing metadata (setup.py): started\n", " Preparing metadata (setup.py): finished with status 'done'\n", - "Collecting gradio\n", - " Downloading gradio-4.44.1-py3-none-any.whl (18.1 MB)\n", - " --------------------------------------- 18.1/18.1 MB 13.1 MB/s eta 0:00:00\n", - "Requirement already satisfied: numpy>=1.17 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (1.21.5)\n", - "Requirement already satisfied: safetensors>=0.4.3 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (0.5.3)\n", - "Requirement already satisfied: tqdm>=4.27 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (4.64.1)\n", - "Requirement already satisfied: packaging>=20.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (21.3)\n", - "Requirement already satisfied: filelock in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (3.6.0)\n", - "Requirement already satisfied: requests in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (2.28.1)\n", - "Requirement already satisfied: pyyaml>=5.1 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (6.0)\n", - "Requirement already satisfied: huggingface-hub<1.0,>=0.30.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (0.30.2)\n", - "Requirement already satisfied: tokenizers<0.22,>=0.21 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (0.21.1)\n", - "Requirement already satisfied: regex!=2019.12.17 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (2022.7.9)\n", - "Requirement already satisfied: jinja2 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (2.11.3)\n", - "Requirement already satisfied: networkx in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (2.8.4)\n", - "Requirement already satisfied: fsspec in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (2025.3.2)\n", - "Collecting typing-extensions>=4.10.0\n", - " Downloading typing_extensions-4.13.2-py3-none-any.whl (45 kB)\n", - " ---------------------------------------- 45.8/45.8 kB ? eta 0:00:00\n", - "Collecting sympy>=1.13.3\n", - " Downloading sympy-1.14.0-py3-none-any.whl (6.3 MB)\n", - " ---------------------------------------- 6.3/6.3 MB 14.9 MB/s eta 0:00:00\n", - "Requirement already satisfied: six in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from langdetect->-r requirements.txt (line 3)) (1.16.0)\n", - "Collecting tomlkit==0.12.0\n", - " Downloading tomlkit-0.12.0-py3-none-any.whl (37 kB)\n", - "Collecting aiofiles<24.0,>=22.0\n", - " Downloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: matplotlib~=3.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (3.5.2)\n", - "Collecting pydantic>=2.0\n", - " Downloading pydantic-2.11.4-py3-none-any.whl (443 kB)\n", - " -------------------------------------- 443.9/443.9 kB 6.9 MB/s eta 0:00:00\n", - "Collecting semantic-version~=2.0\n", - " Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n", - "Requirement already satisfied: anyio<5.0,>=3.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (3.5.0)\n", - "Collecting ffmpy\n", - " Downloading ffmpy-0.5.0-py3-none-any.whl (6.0 kB)\n", - "Requirement already satisfied: pandas<3.0,>=1.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (1.4.4)\n", - "Collecting ruff>=0.2.2\n", - " Downloading ruff-0.11.8-py3-none-win_amd64.whl (11.6 MB)\n", - " --------------------------------------- 11.6/11.6 MB 13.1 MB/s eta 0:00:00\n", - "Collecting typer<1.0,>=0.12\n", - " Downloading typer-0.15.3-py3-none-any.whl (45 kB)\n", - " ---------------------------------------- 45.3/45.3 kB ? eta 0:00:00\n", - "Collecting pydub\n", - " Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n", - "Collecting python-multipart>=0.0.9\n", - " Downloading python_multipart-0.0.20-py3-none-any.whl (24 kB)\n", - "Collecting fastapi<1.0\n", - " Downloading fastapi-0.115.12-py3-none-any.whl (95 kB)\n", - " ---------------------------------------- 95.2/95.2 kB 5.3 MB/s eta 0:00:00\n", - "Requirement already satisfied: pillow<11.0,>=8.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (9.2.0)\n", - "Requirement already satisfied: markupsafe~=2.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (2.0.1)\n", - "Collecting orjson~=3.0\n", - " Downloading orjson-3.10.18-cp39-cp39-win_amd64.whl (134 kB)\n", - " -------------------------------------- 134.5/134.5 kB 4.0 MB/s eta 0:00:00\n", - "Collecting httpx>=0.24.1\n", - " Downloading httpx-0.28.1-py3-none-any.whl (73 kB)\n", - " ---------------------------------------- 73.5/73.5 kB ? eta 0:00:00\n", - "Collecting importlib-resources<7.0,>=1.3\n", - " Downloading importlib_resources-6.5.2-py3-none-any.whl (37 kB)\n", - "Collecting uvicorn>=0.14.0\n", - " Downloading uvicorn-0.34.2-py3-none-any.whl (62 kB)\n", - " ---------------------------------------- 62.5/62.5 kB 3.5 MB/s eta 0:00:00\n", - "Collecting gradio-client==1.3.0\n", - " Downloading gradio_client-1.3.0-py3-none-any.whl (318 kB)\n", - " ------------------------------------- 318.7/318.7 kB 19.3 MB/s eta 0:00:00\n", - "Collecting urllib3~=2.0\n", - " Downloading urllib3-2.4.0-py3-none-any.whl (128 kB)\n", - " ---------------------------------------- 128.7/128.7 kB ? eta 0:00:00\n", - "Collecting websockets<13.0,>=10.0\n", - " Downloading websockets-12.0-cp39-cp39-win_amd64.whl (124 kB)\n", - " ---------------------------------------- 125.0/125.0 kB ? eta 0:00:00\n", - "Requirement already satisfied: idna>=2.8 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from anyio<5.0,>=3.0->gradio->-r requirements.txt (line 4)) (3.3)\n", - "Requirement already satisfied: sniffio>=1.1 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from anyio<5.0,>=3.0->gradio->-r requirements.txt (line 4)) (1.2.0)\n", - "Collecting starlette<0.47.0,>=0.40.0\n", - " Downloading starlette-0.46.2-py3-none-any.whl (72 kB)\n", - " ---------------------------------------- 72.0/72.0 kB 3.9 MB/s eta 0:00:00\n", - "Collecting httpcore==1.*\n", - " Downloading httpcore-1.0.9-py3-none-any.whl (78 kB)\n", - " ---------------------------------------- 78.8/78.8 kB ? eta 0:00:00\n", - "Requirement already satisfied: certifi in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from httpx>=0.24.1->gradio->-r requirements.txt (line 4)) (2022.9.14)\n", - "Collecting h11>=0.16\n", - " Downloading h11-0.16.0-py3-none-any.whl (37 kB)\n", - "Requirement already satisfied: zipp>=3.1.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from importlib-resources<7.0,>=1.3->gradio->-r requirements.txt (line 4)) (3.8.0)\n", - "Requirement already satisfied: pyparsing>=2.2.1 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from matplotlib~=3.0->gradio->-r requirements.txt (line 4)) (3.0.9)\n", - "Requirement already satisfied: fonttools>=4.22.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from matplotlib~=3.0->gradio->-r requirements.txt (line 4)) (4.25.0)\n", - "Requirement already satisfied: kiwisolver>=1.0.1 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from matplotlib~=3.0->gradio->-r requirements.txt (line 4)) (1.4.2)\n", - "Requirement already satisfied: python-dateutil>=2.7 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from matplotlib~=3.0->gradio->-r requirements.txt (line 4)) (2.8.2)\n", - "Requirement already satisfied: cycler>=0.10 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from matplotlib~=3.0->gradio->-r requirements.txt (line 4)) (0.11.0)\n", - "Requirement already satisfied: pytz>=2020.1 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from pandas<3.0,>=1.0->gradio->-r requirements.txt (line 4)) (2022.1)\n", - "Collecting pydantic-core==2.33.2\n", - " Downloading pydantic_core-2.33.2-cp39-cp39-win_amd64.whl (2.0 MB)\n", - " ---------------------------------------- 2.0/2.0 MB 41.4 MB/s eta 0:00:00\n", - "Collecting annotated-types>=0.6.0\n", - " Downloading annotated_types-0.7.0-py3-none-any.whl (13 kB)\n", - "Collecting typing-inspection>=0.4.0\n", - " Downloading typing_inspection-0.4.0-py3-none-any.whl (14 kB)\n", - "Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from sympy>=1.13.3->torch->-r requirements.txt (line 2)) (1.2.1)\n", - "Requirement already satisfied: colorama in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from tqdm>=4.27->transformers->-r requirements.txt (line 1)) (0.4.5)\n", - "Requirement already satisfied: click>=8.0.0 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (8.0.4)\n", - "Collecting shellingham>=1.3.0\n", - " Downloading shellingham-1.5.4-py2.py3-none-any.whl (9.8 kB)\n", - "Collecting rich>=10.11.0\n", - " Downloading rich-14.0.0-py3-none-any.whl (243 kB)\n", - " ------------------------------------- 243.2/243.2 kB 14.6 MB/s eta 0:00:00\n", - "Requirement already satisfied: charset-normalizer<3,>=2 in c:\\users\\issa kabore\\anaconda3\\lib\\site-packages (from requests->transformers->-r requirements.txt (line 1)) (2.0.4)\n", - "Collecting requests\n", - " Downloading requests-2.32.3-py3-none-any.whl (64 kB)\n", - " ---------------------------------------- 64.9/64.9 kB ? eta 0:00:00\n", - "Collecting pygments<3.0.0,>=2.13.0\n", - " Downloading pygments-2.19.1-py3-none-any.whl (1.2 MB)\n", - " ---------------------------------------- 1.2/1.2 MB 38.0 MB/s eta 0:00:00\n", - "Collecting markdown-it-py>=2.2.0\n", - " Downloading markdown_it_py-3.0.0-py3-none-any.whl (87 kB)\n", - " ---------------------------------------- 87.5/87.5 kB ? eta 0:00:00\n", - "Collecting anyio<5.0,>=3.0\n", - " Downloading anyio-4.9.0-py3-none-any.whl (100 kB)\n", - " ---------------------------------------- 100.9/100.9 kB ? eta 0:00:00\n", - "Collecting exceptiongroup>=1.0.2\n", - " Downloading exceptiongroup-1.2.2-py3-none-any.whl (16 kB)\n", - "Collecting mdurl~=0.1\n", - " Downloading mdurl-0.1.2-py3-none-any.whl (10.0 kB)\n", + "Collecting gradio (from -r requirements.txt (line 4))\n", + " Using cached gradio-5.29.0-py3-none-any.whl.metadata (16 kB)\n", + "Requirement already satisfied: filelock in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (3.13.1)\n", + "Collecting huggingface-hub<1.0,>=0.30.0 (from transformers->-r requirements.txt (line 1))\n", + " Using cached huggingface_hub-0.30.2-py3-none-any.whl.metadata (13 kB)\n", + "Requirement already satisfied: numpy>=1.17 in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (1.26.4)\n", + "Requirement already satisfied: packaging>=20.0 in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (24.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (6.0.1)\n", + "Requirement already satisfied: regex!=2019.12.17 in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (2024.9.11)\n", + "Requirement already satisfied: requests in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (2.32.3)\n", + "Collecting tokenizers<0.22,>=0.21 (from transformers->-r requirements.txt (line 1))\n", + " Using cached tokenizers-0.21.1-cp39-abi3-win_amd64.whl.metadata (6.9 kB)\n", + "Collecting safetensors>=0.4.3 (from transformers->-r requirements.txt (line 1))\n", + " Using cached safetensors-0.5.3-cp38-abi3-win_amd64.whl.metadata (3.9 kB)\n", + "Requirement already satisfied: tqdm>=4.27 in c:\\projets\\anaconda3\\lib\\site-packages (from transformers->-r requirements.txt (line 1)) (4.66.5)\n", + "Requirement already satisfied: typing-extensions>=4.10.0 in c:\\projets\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (4.11.0)\n", + "Collecting sympy>=1.13.3 (from torch->-r requirements.txt (line 2))\n", + " Using cached sympy-1.14.0-py3-none-any.whl.metadata (12 kB)\n", + "Requirement already satisfied: networkx in c:\\projets\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (3.3)\n", + "Requirement already satisfied: jinja2 in c:\\projets\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (3.1.4)\n", + "Requirement already satisfied: fsspec in c:\\projets\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (2024.6.1)\n", + "Requirement already satisfied: setuptools in c:\\projets\\anaconda3\\lib\\site-packages (from torch->-r requirements.txt (line 2)) (75.1.0)\n", + "Requirement already satisfied: six in c:\\projets\\anaconda3\\lib\\site-packages (from langdetect->-r requirements.txt (line 3)) (1.16.0)\n", + "Collecting aiofiles<25.0,>=22.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached aiofiles-24.1.0-py3-none-any.whl.metadata (10 kB)\n", + "Requirement already satisfied: anyio<5.0,>=3.0 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (4.2.0)\n", + "Collecting fastapi<1.0,>=0.115.2 (from gradio->-r requirements.txt (line 4))\n", + " Using cached fastapi-0.115.12-py3-none-any.whl.metadata (27 kB)\n", + "Collecting ffmpy (from gradio->-r requirements.txt (line 4))\n", + " Using cached ffmpy-0.5.0-py3-none-any.whl.metadata (3.0 kB)\n", + "Collecting gradio-client==1.10.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached gradio_client-1.10.0-py3-none-any.whl.metadata (7.1 kB)\n", + "Collecting groovy~=0.1 (from gradio->-r requirements.txt (line 4))\n", + " Using cached groovy-0.1.2-py3-none-any.whl.metadata (6.1 kB)\n", + "Requirement already satisfied: httpx>=0.24.1 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (0.27.0)\n", + "Requirement already satisfied: markupsafe<4.0,>=2.0 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (2.1.3)\n", + "Collecting orjson~=3.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached orjson-3.10.18-cp312-cp312-win_amd64.whl.metadata (43 kB)\n", + "Requirement already satisfied: pandas<3.0,>=1.0 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (2.2.2)\n", + "Requirement already satisfied: pillow<12.0,>=8.0 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (10.4.0)\n", + "Requirement already satisfied: pydantic<2.12,>=2.0 in c:\\projets\\anaconda3\\lib\\site-packages (from gradio->-r requirements.txt (line 4)) (2.8.2)\n", + "Collecting pydub (from gradio->-r requirements.txt (line 4))\n", + " Using cached pydub-0.25.1-py2.py3-none-any.whl.metadata (1.4 kB)\n", + "Collecting python-multipart>=0.0.18 (from gradio->-r requirements.txt (line 4))\n", + " Using cached python_multipart-0.0.20-py3-none-any.whl.metadata (1.8 kB)\n", + "Collecting ruff>=0.9.3 (from gradio->-r requirements.txt (line 4))\n", + " Using cached ruff-0.11.8-py3-none-win_amd64.whl.metadata (26 kB)\n", + "Collecting safehttpx<0.2.0,>=0.1.6 (from gradio->-r requirements.txt (line 4))\n", + " Using cached safehttpx-0.1.6-py3-none-any.whl.metadata (4.2 kB)\n", + "Collecting semantic-version~=2.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached semantic_version-2.10.0-py2.py3-none-any.whl.metadata (9.7 kB)\n", + "Collecting starlette<1.0,>=0.40.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached starlette-0.46.2-py3-none-any.whl.metadata (6.2 kB)\n", + "Collecting tomlkit<0.14.0,>=0.12.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached tomlkit-0.13.2-py3-none-any.whl.metadata (2.7 kB)\n", + "Collecting typer<1.0,>=0.12 (from gradio->-r requirements.txt (line 4))\n", + " Using cached typer-0.15.3-py3-none-any.whl.metadata (15 kB)\n", + "Collecting uvicorn>=0.14.0 (from gradio->-r requirements.txt (line 4))\n", + " Using cached uvicorn-0.34.2-py3-none-any.whl.metadata (6.5 kB)\n", + "Collecting websockets<16.0,>=10.0 (from gradio-client==1.10.0->gradio->-r requirements.txt (line 4))\n", + " Using cached websockets-15.0.1-cp312-cp312-win_amd64.whl.metadata (7.0 kB)\n", + "Requirement already satisfied: idna>=2.8 in c:\\projets\\anaconda3\\lib\\site-packages (from anyio<5.0,>=3.0->gradio->-r requirements.txt (line 4)) (3.7)\n", + "Requirement already satisfied: sniffio>=1.1 in c:\\projets\\anaconda3\\lib\\site-packages (from anyio<5.0,>=3.0->gradio->-r requirements.txt (line 4)) (1.3.0)\n", + "Requirement already satisfied: certifi in c:\\projets\\anaconda3\\lib\\site-packages (from httpx>=0.24.1->gradio->-r requirements.txt (line 4)) (2024.8.30)\n", + "Requirement already satisfied: httpcore==1.* in c:\\projets\\anaconda3\\lib\\site-packages (from httpx>=0.24.1->gradio->-r requirements.txt (line 4)) (1.0.2)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in c:\\projets\\anaconda3\\lib\\site-packages (from httpcore==1.*->httpx>=0.24.1->gradio->-r requirements.txt (line 4)) (0.14.0)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\projets\\anaconda3\\lib\\site-packages (from pandas<3.0,>=1.0->gradio->-r requirements.txt (line 4)) (2.9.0.post0)\n", + "Requirement already satisfied: pytz>=2020.1 in c:\\projets\\anaconda3\\lib\\site-packages (from pandas<3.0,>=1.0->gradio->-r requirements.txt (line 4)) (2024.1)\n", + "Requirement already satisfied: tzdata>=2022.7 in c:\\projets\\anaconda3\\lib\\site-packages (from pandas<3.0,>=1.0->gradio->-r requirements.txt (line 4)) (2023.3)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in c:\\projets\\anaconda3\\lib\\site-packages (from pydantic<2.12,>=2.0->gradio->-r requirements.txt (line 4)) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.20.1 in c:\\projets\\anaconda3\\lib\\site-packages (from pydantic<2.12,>=2.0->gradio->-r requirements.txt (line 4)) (2.20.1)\n", + "Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\\projets\\anaconda3\\lib\\site-packages (from sympy>=1.13.3->torch->-r requirements.txt (line 2)) (1.3.0)\n", + "Requirement already satisfied: colorama in c:\\projets\\anaconda3\\lib\\site-packages (from tqdm>=4.27->transformers->-r requirements.txt (line 1)) (0.4.6)\n", + "Requirement already satisfied: click>=8.0.0 in c:\\projets\\anaconda3\\lib\\site-packages (from typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (8.1.7)\n", + "Collecting shellingham>=1.3.0 (from typer<1.0,>=0.12->gradio->-r requirements.txt (line 4))\n", + " Using cached shellingham-1.5.4-py2.py3-none-any.whl.metadata (3.5 kB)\n", + "Requirement already satisfied: rich>=10.11.0 in c:\\projets\\anaconda3\\lib\\site-packages (from typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (13.7.1)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\projets\\anaconda3\\lib\\site-packages (from requests->transformers->-r requirements.txt (line 1)) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\projets\\anaconda3\\lib\\site-packages (from requests->transformers->-r requirements.txt (line 1)) (2.2.3)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\projets\\anaconda3\\lib\\site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (2.2.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\projets\\anaconda3\\lib\\site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (2.15.1)\n", + "Requirement already satisfied: mdurl~=0.1 in c:\\projets\\anaconda3\\lib\\site-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio->-r requirements.txt (line 4)) (0.1.0)\n", + "Using cached transformers-4.51.3-py3-none-any.whl (10.4 MB)\n", + "Using cached torch-2.7.0-cp312-cp312-win_amd64.whl (212.5 MB)\n", + "Using cached gradio-5.29.0-py3-none-any.whl (54.1 MB)\n", + "Using cached gradio_client-1.10.0-py3-none-any.whl (322 kB)\n", + "Using cached aiofiles-24.1.0-py3-none-any.whl (15 kB)\n", + "Using cached fastapi-0.115.12-py3-none-any.whl (95 kB)\n", + "Using cached groovy-0.1.2-py3-none-any.whl (14 kB)\n", + "Using cached huggingface_hub-0.30.2-py3-none-any.whl (481 kB)\n", + "Using cached orjson-3.10.18-cp312-cp312-win_amd64.whl (134 kB)\n", + "Using cached python_multipart-0.0.20-py3-none-any.whl (24 kB)\n", + "Using cached ruff-0.11.8-py3-none-win_amd64.whl (11.6 MB)\n", + "Using cached safehttpx-0.1.6-py3-none-any.whl (8.7 kB)\n", + "Using cached safetensors-0.5.3-cp38-abi3-win_amd64.whl (308 kB)\n", + "Using cached semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n", + "Using cached starlette-0.46.2-py3-none-any.whl (72 kB)\n", + "Using cached sympy-1.14.0-py3-none-any.whl (6.3 MB)\n", + "Using cached tokenizers-0.21.1-cp39-abi3-win_amd64.whl (2.4 MB)\n", + "Using cached tomlkit-0.13.2-py3-none-any.whl (37 kB)\n", + "Using cached typer-0.15.3-py3-none-any.whl (45 kB)\n", + "Using cached uvicorn-0.34.2-py3-none-any.whl (62 kB)\n", + "Using cached ffmpy-0.5.0-py3-none-any.whl (6.0 kB)\n", + "Using cached pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n", + "Using cached shellingham-1.5.4-py2.py3-none-any.whl (9.8 kB)\n", + "Using cached websockets-15.0.1-cp312-cp312-win_amd64.whl (176 kB)\n", "Building wheels for collected packages: langdetect\n", " Building wheel for langdetect (setup.py): started\n", " Building wheel for langdetect (setup.py): finished with status 'done'\n", - " Created wheel for langdetect: filename=langdetect-1.0.9-py3-none-any.whl size=993225 sha256=b37ed7c002d96ce87bc295fafe28e5a92854509e09bf0d18175aae952cdcd3ea\n", - " Stored in directory: c:\\users\\issa kabore\\appdata\\local\\pip\\cache\\wheels\\d1\\c1\\d9\\7e068de779d863bc8f8fc9467d85e25cfe47fa5051fff1a1bb\n", + " Created wheel for langdetect: filename=langdetect-1.0.9-py3-none-any.whl size=993251 sha256=3a74895055c4617e0c441fdc81d151bb639b480a0b88b0bb4003677da1e40c3a\n", + " Stored in directory: c:\\users\\issa kabore\\appdata\\local\\pip\\cache\\wheels\\c1\\67\\88\\e844b5b022812e15a52e4eaa38a1e709e99f06f6639d7e3ba7\n", "Successfully built langdetect\n", - "Installing collected packages: pydub, websockets, urllib3, typing-extensions, tomlkit, sympy, shellingham, semantic-version, ruff, python-multipart, pygments, orjson, mdurl, langdetect, importlib-resources, h11, ffmpy, exceptiongroup, annotated-types, aiofiles, uvicorn, typing-inspection, torch, requests, pydantic-core, markdown-it-py, httpcore, anyio, starlette, rich, pydantic, httpx, typer, gradio-client, fastapi, gradio\n", - " Attempting uninstall: urllib3\n", - " Found existing installation: urllib3 1.26.11\n", - " Uninstalling urllib3-1.26.11:\n", - " Successfully uninstalled urllib3-1.26.11\n", - " Attempting uninstall: typing-extensions\n", - " Found existing installation: typing_extensions 4.3.0\n", - " Uninstalling typing_extensions-4.3.0:\n", - " Successfully uninstalled typing_extensions-4.3.0\n", + "Installing collected packages: pydub, websockets, tomlkit, sympy, shellingham, semantic-version, safetensors, ruff, python-multipart, orjson, langdetect, groovy, ffmpy, aiofiles, uvicorn, torch, starlette, huggingface-hub, typer, tokenizers, safehttpx, gradio-client, fastapi, transformers, gradio\n", " Attempting uninstall: tomlkit\n", " Found existing installation: tomlkit 0.11.1\n", " Uninstalling tomlkit-0.11.1:\n", " Successfully uninstalled tomlkit-0.11.1\n", " Attempting uninstall: sympy\n", - " Found existing installation: sympy 1.10.1\n", - " Uninstalling sympy-1.10.1:\n", - " Successfully uninstalled sympy-1.10.1\n", - " Attempting uninstall: pygments\n", - " Found existing installation: Pygments 2.11.2\n", - " Uninstalling Pygments-2.11.2:\n", - " Successfully uninstalled Pygments-2.11.2\n", - " Attempting uninstall: requests\n", - " Found existing installation: requests 2.28.1\n", - " Uninstalling requests-2.28.1:\n", - " Successfully uninstalled requests-2.28.1\n", - " Attempting uninstall: anyio\n", - " Found existing installation: anyio 3.5.0\n", - " Uninstalling anyio-3.5.0:\n", - " Successfully uninstalled anyio-3.5.0\n", - "Successfully installed aiofiles-23.2.1 annotated-types-0.7.0 anyio-4.9.0 exceptiongroup-1.2.2 fastapi-0.115.12 ffmpy-0.5.0 gradio-4.44.1 gradio-client-1.3.0 h11-0.16.0 httpcore-1.0.9 httpx-0.28.1 importlib-resources-6.5.2 langdetect-1.0.9 markdown-it-py-3.0.0 mdurl-0.1.2 orjson-3.10.18 pydantic-2.11.4 pydantic-core-2.33.2 pydub-0.25.1 pygments-2.19.1 python-multipart-0.0.20 requests-2.32.3 rich-14.0.0 ruff-0.11.8 semantic-version-2.10.0 shellingham-1.5.4 starlette-0.46.2 sympy-1.14.0 tomlkit-0.12.0 torch-2.7.0 typer-0.15.3 typing-extensions-4.13.2 typing-inspection-0.4.0 urllib3-2.4.0 uvicorn-0.34.2 websockets-12.0\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", - "spyder 5.2.2 requires pyqt5<5.13, which is not installed.\n", - "spyder 5.2.2 requires pyqtwebengine<5.13, which is not installed.\n", - "jupyter-server 1.18.1 requires anyio<4,>=3.1.0, but you have anyio 4.9.0 which is incompatible.\n", - "conda-repo-cli 1.0.24 requires clyent==1.2.1, but you have clyent 1.2.2 which is incompatible.\n", - "conda-repo-cli 1.0.24 requires nbformat==5.4.0, but you have nbformat 5.5.0 which is incompatible.\n", - "conda-repo-cli 1.0.24 requires requests==2.28.1, but you have requests 2.32.3 which is incompatible.\n", - "botocore 1.27.28 requires urllib3<1.27,>=1.25.4, but you have urllib3 2.4.0 which is incompatible.\n" + " Found existing installation: sympy 1.13.2\n", + " Uninstalling sympy-1.13.2:\n", + " Successfully uninstalled sympy-1.13.2\n", + "Successfully installed aiofiles-24.1.0 fastapi-0.115.12 ffmpy-0.5.0 gradio-5.29.0 gradio-client-1.10.0 groovy-0.1.2 huggingface-hub-0.30.2 langdetect-1.0.9 orjson-3.10.18 pydub-0.25.1 python-multipart-0.0.20 ruff-0.11.8 safehttpx-0.1.6 safetensors-0.5.3 semantic-version-2.10.0 shellingham-1.5.4 starlette-0.46.2 sympy-1.14.0 tokenizers-0.21.1 tomlkit-0.13.2 torch-2.7.0 transformers-4.51.3 typer-0.15.3 uvicorn-0.34.2 websockets-15.0.1\n" ] } ], "source": [ "# !pip install transformers sentencepiece\n", - "!pip install -r requirements.txt" + "! pip install -r requirements.txt" ] }, { @@ -238,20 +187,29343 @@ "source": [ "# import loguru\n", "\n", - "from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM\n", + "from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, LlamaTokenizer\n", "import textwrap # Text wrapping and filling\n", "\n", "import gradio as gr\n", - "from langdetect import detect" + "from langdetect import detect\n", + "import sys" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "80f389cd", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3.12.7 | packaged by Anaconda, Inc. | (main, Oct 4 2024, 13:17:27) [MSC v.1929 64 bit (AMD64)]\n" + ] + } + ], + "source": [ + "print(sys.version)" + ] + }, + { + "cell_type": "markdown", + "id": "02f7fe5c", + "metadata": {}, + "source": [ + "## 🧠 Model Descriptions\n", + "\n", + "### 🇬🇧 `facebook/bart-large-cnn` — English Text Summarization\n", + "\n", + "**BART (Bidirectional and Auto-Regressive Transformer)** is a model developed by Facebook AI that combines the strengths of **encoder-decoder** architectures (like T5) and **auto-regressive** models (like GPT). It is **fine-tuned** on the **CNN/DailyMail dataset**, consisting of articles and summaries.\n", + "\n", + "- **Use Case**: Excellent for **journalistic**, **informal**, or **structured opinion texts**.\n", + "- **Type of Summary**: **Abstractive** (paraphrasing, not just extraction).\n", + "\n", + "**Architecture**:\n", + "- 12 layers of encoder + 12 layers of decoder\n", + "- Bidirectional attention for encoding, causal attention for decoding\n", + "- Around **406M parameters**\n", + "\n", + "---\n", + "\n", + "### 🇫🇷 `plguillou/t5-base-fr-sum-cnndm` — French Text Summarization\n", + "\n", + "Based on **T5 (Text-to-Text Transfer Transformer)**, developed by Google. This model is **fine-tuned** for **French text summarization** on a dataset inspired by CNN/DailyMail.\n", + "\n", + "- **Use Case**: Best for **formal** or **structured** texts: **news articles**, **reports**, or **official documents**.\n", + "- **Type of Summary**: **Abstractive** (rephrasing the input text in its own words).\n", + "\n", + "**Architecture**:\n", + "- **T5-base**: Around **220M parameters**\n", + "- Multilingual, but fine-tuned specifically for **French**.\n", + "\n", + "---\n", + "\n", + "### 🌍 `facebook/mbart-large-50-one-to-many-mmt` — Multilingual Text Summarization\n", + "\n", + "**mBART (Multilingual BART)** is a variation of the BART model that is trained on **multiple languages**. It is designed for **translation** tasks but can also be adapted for **summarization**.\n", + "\n", + "- **Use Case**: Suitable for summarizing text in multiple languages, making it a versatile tool for multilingual applications.\n", + "- **Type of Summary**: **Abstractive**.\n", + "\n", + "**Architecture**:\n", + "- 12 layers of encoder + 12 layers of decoder\n", + "- Multilingual model trained on 50 languages\n", + "- Around **680M parameters**\n", + "\n", + "---\n", + "\n", + "### 🔄 `google/t5-base-xxl-tlm` — T5 for Multilingual Tasks\n", + "\n", + "**T5** (Text-to-Text Transfer Transformer) is a model that frames all NLP tasks as a text-to-text problem, making it highly adaptable. It has been fine-tuned for multiple tasks including **summarization**.\n", + "\n", + "- **Use Case**: Works well for **multilingual summarization**, but can also be used for translation, question-answering, etc.\n", + "- **Type of Summary**: **Abstractive** (like all T5-based models).\n", + "\n", + "**Architecture**:\n", + "- **T5-base**: Around **220M parameters**\n", + "- **T5-XXL**: Much larger, up to **11B parameters**\n", + "- Fine-tuned for many multilingual tasks\n", + "\n", + "---\n", + "\n", + "### 🚀 `google/flan-t5-xl` — Fine-tuned T5 for Better Generalization\n", + "\n", + "**FLAN-T5** is a version of T5 that is **fine-tuned on a variety of tasks** to improve generalization. It aims to perform better on a wide range of NLP tasks, including summarization, when compared to regular T5.\n", + "\n", + "- **Use Case**: Ideal for **high-quality summarization** tasks in multiple languages, with improved robustness.\n", + "- **Type of Summary**: **Abstractive**.\n", + "\n", + "**Architecture**:\n", + "- **T5-XL**: Large model with **11B parameters**.\n", + "- Fine-tuned on a wide variety of tasks, improving the model's ability to generalize across domains.\n", + "\n", + "---\n", + "\n", + "### 📊 Quick Comparison\n", + "\n", + "| Model | Language | Architecture | Fine-Tuning | Type of Summary |\n", + "|-------------------------------|---------------|---------------------|------------------------|-----------------|\n", + "| `facebook/bart-large-cnn` | English | BART | CNN/DailyMail | Abstractive |\n", + "| `plguillou/t5-base-fr-sum` | French | T5 (Base) | CNN/DailyMail FR | Abstractive |\n", + "| `facebook/mbart-large-50` | Multilingual | mBART | Multilingual (50 languages) | Abstractive |\n", + "| `google/t5-base-xxl-tlm` | Multilingual | T5 (Base or XXL) | Multilingual | Abstractive |\n", + "| `google/flan-t5-xl` | Multilingual | T5 (Fine-tuned) | Fine-tuned for better generalization | Abstractive |\n" + ] + }, + { + "cell_type": "markdown", + "id": "f742eff8", + "metadata": {}, + "source": [ + "## Choice models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f4aa015", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Device set to use cpu\n", + "Device set to use cpu\n" + ] + } + ], + "source": [ + "# English summarization model (BART)\n", + "summarizer_en = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\")\n", + "\n", + "# French summarization model (T5 fine-tuned for summarization)\n", + "summarizer_fr = pipeline(\"summarization\", model=\"plguillou/t5-base-fr-sum-cnndm\")\n", + "\n", + "# fr_model_name = \"plguillou/t5-base-fr-sum-cnndm\"\n", + "# tokenizer_fr = AutoTokenizer.from_pretrained(fr_model_name)\n", + "# model_fr = AutoModelForSeq2SeqLM.from_pretrained(fr_model_name)\n", + "# summarizer_fr = pipeline(\"summarization\", model=model_fr, tokenizer=tokenizer_fr)" + ] + }, + { + "cell_type": "markdown", + "id": "3446c46e", + "metadata": {}, + "source": [ + "## 🧪 Application: Testing models" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "e0f04271", + "metadata": {}, + "outputs": [], + "source": [ + "text_en = \"\"\"\n", + "Artificial Intelligence is revolutionizing many industries such as healthcare, finance, and transportation.\n", + "Machine learning techniques now enable systems to analyze vast amounts of data and make decisions with minimal human input.\n", + "However, these advances raise concerns over data privacy, algorithmic transparency, and job displacement.\n", + "\"\"\"\n", + "\n", + "text_fr = \"\"\"\n", + "L'intelligence artificielle transforme profondément des secteurs comme la santé, les transports et l'éducation.\n", + "Grâce à l'apprentissage automatique, les systèmes peuvent analyser de grandes quantités de données et prendre des décisions complexes.\n", + "Cependant, cela soulève des enjeux éthiques majeurs sur la transparence, l'emploi et la confidentialité.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "beb7430f", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 100, but your input_length is only 62. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=31)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔹 Original English Text:\n", + "\n", + " Artificial Intelligence is revolutionizing many industries such as healthcare, finance, and\n", + "transportation. Machine learning techniques now enable systems to analyze vast amounts of data and\n", + "make decisions with minimal human input. However, these advances raise concerns over data privacy,\n", + "algorithmic transparency, and job displacement.\n", + "\n", + "✅ English Summary:\n", + "\n", + "Machine learning techniques now enable systems to analyze vast amounts of data and make decisions\n", + "with minimal human input. These advances raise concerns over data privacy, algorithmic transparency,\n", + "and job displacement.\n" + ] + } + ], + "source": [ + "print(\"🔹 Original English Text:\\n\")\n", + "print(textwrap.fill(text_en, width=100))\n", + "\n", + "summary_en = summarizer_en(text_en, max_length=100, min_length=30, do_sample=False)\n", + "\n", + "print(\"\\n✅ English Summary:\\n\")\n", + "print(textwrap.fill(summary_en[0][\"summary_text\"], width=100))" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "df28cdbb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔹 Texte original en français:\n", + "\n", + " L'intelligence artificielle transforme profondément des secteurs comme la santé, les transports et\n", + "l'éducation. Grâce à l'apprentissage automatique, les systèmes peuvent analyser de grandes quantités\n", + "de données et prendre des décisions complexes. Cependant, cela soulève des enjeux éthiques majeurs\n", + "sur la transparence, l'emploi et la confidentialité.\n", + "\n", + "✅ Résumé en français:\n", + "\n", + "L'intelligence artificielle transforme profondément des secteurs comme la santé, les transports et\n", + "l'éducation. Cependant, cela soulève des enjeux éthiques majeurs sur la transparence.\n" + ] + } + ], + "source": [ + "print(\"🔹 Texte original en français:\\n\")\n", + "print(textwrap.fill(text_fr, width=100))\n", + "\n", + "summary_fr = summarizer_fr(text_fr, max_length=100, min_length=30, do_sample=False)\n", + "\n", + "print(\"\\n✅ Résumé en français:\\n\")\n", + "print(textwrap.fill(summary_fr[0][\"summary_text\"], width=100))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "4a66ff17", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔹 Texte original en français:\n", + "\n", + " L'intelligence artificielle transforme profondément des secteurs comme la santé, les transports et\n", + "l'éducation. Grâce à l'apprentissage automatique, les systèmes peuvent analyser de grandes quantités\n", + "de données et prendre des décisions complexes. Cependant, cela soulève des enjeux éthiques majeurs\n", + "sur la transparence, l'emploi et la confidentialité.\n", + "\n", + "✅ Résumé en français:\n", + "\n", + "L'intelligence artificielle transforme profondément des secteurs tels que la santé, les transports\n", + "et l'éducation. Cependant, cela soulève des enjeux éthiques majeurs sur la transparence.\n" + ] + } + ], + "source": [ + "print(\"🔹 Texte original en français:\\n\")\n", + "print(textwrap.fill(text_fr, width=100))\n", + "\n", + "summary_fr = summarizer_fr(text_fr, max_length=100, min_length=30, do_sample=True)\n", + "\n", + "print(\"\\n✅ Résumé en français:\\n\")\n", + "print(textwrap.fill(summary_fr[0][\"summary_text\"], width=100))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "9fee67fe", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 100, but your input_length is only 3. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=1)\n" + ] + }, + { + "data": { + "text/plain": [ + "[{'summary_text': \"Selon les autorités, il s'agit d'un événement qui n'a pas eu lieu à l'époque.\"}]" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "empty_text = \"\"\n", + "\n", + "summary_fr = summarizer_fr(empty_text, max_length=100, min_length=30, do_sample=False)\n", + "summary_fr" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "a1d65a17", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 60, but your input_length is only 39. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=19)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🔁 Résumés français (batch):\n", + "\n", + "⚠️ Input text is empty. Please provide valid content to summarize.\n", + "⚠️ Input text is too short to summarize meaningfully.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 60, but your input_length is only 24. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=12)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📌 Texte: Machine learning techniques now enable systems to analyze vast amounts of data and make decisions\n", + " with minimal human input. These advances raise concerns over data privacy, algorithmic transparency,\n", + " and job displacement.\n", + "➡️ Résumé: Les nouvelles techniques de machine-learning permettent aux systèmes d'analyser de vastes quantités de données. Les avancées entraînent des problèmes de protection de la vie privée, de transparence et de licenciement.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 60, but your input_length is only 32. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=16)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📌 Texte: Le réchauffement climatique provoque des événements météorologiques extrêmes dans le monde entier.\n", + "➡️ Résumé: Le réchauffement climatique provoque des événements météorologiques extrêmes dans le monde entier.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 60, but your input_length is only 33. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=16)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📌 Texte: La France accueille chaque année des millions de touristes attirés par sa culture et sa gastronomie.\n", + "➡️ Résumé: La France accueille chaque année des millions de touristes attirés par la culture et la gastronomie.\n", + "\n", + "📌 Texte: Les véhicules autonomes utilisent des capteurs et de l'IA pour se déplacer sans conducteur humain.\n", + "➡️ Résumé: Les véhicules autonomes utilisent des capteurs et de l'IA pour se déplacer sans conducteur humain.\n", + "\n" + ] + } + ], + "source": [ + "texts_fr = [\n", + " \"\",\n", + " \"\"\"Machine learning techniques now enable systems to analyze vast amounts of data and make decisions\n", + " with minimal human input. These advances raise concerns over data privacy, algorithmic transparency,\n", + " and job displacement.\"\"\",\n", + " \"Le réchauffement climatique provoque des événements météorologiques extrêmes dans le monde entier.\",\n", + " \"La France accueille chaque année des millions de touristes attirés par sa culture et sa gastronomie.\",\n", + " \"Les véhicules autonomes utilisent des capteurs et de l'IA pour se déplacer sans conducteur humain.\"\n", + "]\n", + "\n", + "print(\"🔁 Résumés français (batch):\\n\")\n", + "for t in texts_fr:\n", + " text = t.strip()\n", + " if not text:\n", + " print(\"⚠️ Input text is empty. Please provide valid content to summarize.\")\n", + "\n", + " if len(text.split()) < 5:\n", + " print(\"⚠️ Input text is too short to summarize meaningfully.\")\n", + " \n", + " else:\n", + " s = summarizer_fr(t, max_length=60, min_length=20, do_sample=True)\n", + " print(f\"📌 Texte: {t}\\n➡️ Résumé: {s[0]['summary_text']}\\n\")\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "2c4fa751", + "metadata": {}, + "source": [ + " ## 🔍 Auto detect text language" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "751a2778", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📝 Text: Bonjour, comment allez-vous ?\n", + "➡️ Language detected: fr\n", + "\n", + "📝 Text: Hello, how are you doing?\n", + "➡️ Language detected: en\n", + "\n", + "📝 Text: Hola, ¿cómo estás?\n", + "➡️ Language detected: es\n", + "\n", + "📝 Text: Guten Tag, wie geht's Ihnen?\n", + "➡️ Language detected: de\n", + "\n", + "📝 Text: \n", + "❌ Could not detect language\n", + "\n", + "📝 Text: こんにちは、お元気ですか?\n", + "➡️ Language detected: ja\n", + "\n", + "📝 Text: 1234567890 $$$ ???\n", + "❌ Could not detect language\n", + "\n" + ] + } + ], + "source": [ + "\n", + "texts = [\n", + " \"Bonjour, comment allez-vous ?\", # French\n", + " \"Hello, how are you doing?\", # English\n", + " \"Hola, ¿cómo estás?\", # Spanish\n", + " \"Guten Tag, wie geht's Ihnen?\", # German\n", + " \"\", # Empty\n", + " \"こんにちは、お元気ですか?\", # Japanese\n", + " \"1234567890 $$$ ???\", # Gibberish\n", + "]\n", + "\n", + "for text in texts:\n", + " try:\n", + " lang = detect(text)\n", + " print(f\"📝 Text: {text}\\n➡️ Language detected: {lang}\\n\")\n", + " except:\n", + " print(f\"📝 Text: {text}\\n❌ Could not detect language\\n\")" + ] + }, + { + "cell_type": "markdown", + "id": "8c85c7b9", + "metadata": {}, + "source": [ + "## Applications: Scripts" + ] + }, + { + "cell_type": "markdown", + "id": "fcfea0de", + "metadata": {}, + "source": [ + "### Summarizer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a65e77a", + "metadata": {}, "outputs": [], - "source": [] + "source": [ + "from summarizer.utils import detect_language, read_file\n", + "from summarizer.summarize import generate_summary\n", + "\n", + "import PyPDF2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2fd93d44", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📝 Text: Bonjour, comment allez-vous ?\n", + "➡️ Language detected: fr\n", + "\n", + "📝 Text: Hello, how are you doing?\n", + "➡️ Language detected: en\n", + "\n", + "📝 Text: Hola, ¿cómo estás?\n", + "➡️ Language detected: es\n", + "\n", + "📝 Text: Guten Tag, wie geht's Ihnen?\n", + "➡️ Language detected: de\n", + "\n", + "📝 Text: \n", + "➡️ Language detected: unknown\n", + "\n", + "📝 Text: こんにちは、お元気ですか?\n", + "➡️ Language detected: ja\n", + "\n", + "📝 Text: 1234567890 $$$ ???\n", + "➡️ Language detected: unknown\n", + "\n" + ] + } + ], + "source": [ + "\n", + "texts = [\n", + " \"Bonjour, comment allez-vous ?\", # French\n", + " \"Hello, how are you doing?\", # English\n", + " \"Hola, ¿cómo estás?\", # Spanish\n", + " \"Guten Tag, wie geht's Ihnen?\", # German\n", + " \"\", # Empty\n", + " \"こんにちは、お元気ですか?\", # Japanese\n", + " \"1234567890 $$$ ???\", # Gibberish\n", + "]\n", + "\n", + "for text in texts:\n", + " try:\n", + " lang = detect_language(text)\n", + " print(f\"📝 Text: {text}\\n➡️ Language detected: {lang}\\n\")\n", + " except:\n", + " print(f\"📝 Text: {text}\\n❌ Could not detect language\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "965911ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original French Text:\n", + " Le changement climatique est une menace majeure pour la planète.\n", + "Les experts estiment que si les émissions de gaz à effet de serre ne sont pas réduites de manière significative,\n", + " les températures mondiales continueront d'augmenter, provoquant des phénomènes météorologiques extrêmes, des inondations,\n", + " des sécheresses prolongées et la montée du niveau des mers.\n", + " Pour y faire face, il est nécessaire de transformer nos modes de production et de consommation, de développer les énergies renouvelables,\n", + " et de mettre en œuvre des politiques publiques ambitieuses.\n", + "\n" + ] + } + ], + "source": [ + "def read_txt_file(filepath: str) -> str:\n", + " \"\"\"Read content from a .txt file. \"\"\"\n", + " try:\n", + " with open(filepath, \"r\", encoding=\"utf-8\") as f:\n", + " content = f.read()\n", + " return content\n", + " except Exception as e:\n", + " print(f\"❌ Error reading TXT file: {e}\")\n", + " return \"\"\n", + "\n", + "text = read_txt_file(\"assets/sample_fr.txt\")\n", + "print(f\"Original French Text:\\n {text}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "1dc60378", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Résumé FR:\n", + " Les experts estiment que si les émissions de gaz à effet de serre ne sont pas réduites de façon\n", + "significative, les températures mondiales continueront d'augmenter. Il est nécessaire de transformer\n", + "nos modes de production et de consommation, de développer les énergies renouvelables, d’adopter des\n", + "politiques publiques ambitieuses.\n" + ] + } + ], + "source": [ + "\n", + "summary = generate_summary(text=text)\n", + "print(f\"Résumé FR:\\n {textwrap.fill(summary, width=100)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "id": "63b78254", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original EN Text:\n", + " Climate change is undeniably one of the most significant global challenges of our time. Its effects\n", + "are being felt across the globe, from rising sea levels to more frequent and intense natural disasters.\n", + "Governments, businesses, and individuals must take immediate and sustained action to reduce\n", + "greenhouse gas emissions. Investing in renewable energy, promoting sustainable transportation,\n", + "and encouraging conservation are essential steps. The science is clear: if we do not act now, the\n", + "consequences will be irreversible and catastrophic.\n" + ] + } + ], + "source": [ + "def read_pdf_file(filepath: str) -> str:\n", + " try:\n", + " with open(filepath, \"rb\") as file:\n", + " pdf_reader = PyPDF2.PdfReader(file)\n", + " text = \"\"\n", + " \n", + " # Loop through each page and extract text\n", + " for page_num in range(len(pdf_reader.pages)):\n", + " page = pdf_reader.pages[page_num]\n", + " text += page.extract_text()\n", + " \n", + " return text.strip()\n", + " \n", + " except Exception as e:\n", + " print(f\"❌ Error reading PDF file: {e}\")\n", + " return \"\"\n", + " \n", + "\n", + "text = read_pdf_file(\"assets/sample_en.pdf\")\n", + "print(f\"Original EN Text:\\n {text}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "e00eb1a9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Résumé FR:\n", + " Governments, businesses, and individuals must take immediate and sustained action to reduce\n", + "greenhouse gas emissions. Investing in renewable energy, promoting sustainable transportation, and\n", + "encouraging conservation are essential steps.\n" + ] + } + ], + "source": [ + "\n", + "summary = generate_summary(text=text)\n", + "print(f\"Résumé EN:\\n {textwrap.fill(summary, width=100)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "851ce8a9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "File content:\n", + "Le changement climatique est une menace majeure pour la planète.\n", + "Les experts estiment que si les émissions de gaz à effet de serre ne sont pas réduites de manière significative,\n", + " les températures mondiales continueront d'augmenter, provoquant des phénomènes météorologiques extrêmes, des inondations,\n", + " des sécheresses prolongées et la montée du niveau des mers.\n", + " Pour y faire face, il est nécessaire de transformer nos modes de production et de consommation, de développer les énergies renouvelables,\n", + " et de mettre en œuvre des politiques publiques ambitieuses.\n", + "\n", + "\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "\n", + "def read_txt_file(filepath: str) -> str:\n", + " \"\"\"Read content from a .txt file.\"\"\"\n", + " try:\n", + " with open(filepath, \"r\", encoding=\"utf-8\") as f:\n", + " content = f.read()\n", + " return content\n", + " except Exception as e:\n", + " print(f\"❌ Error reading TXT file: {e}\")\n", + " return \"\"\n", + "\n", + "\n", + "def read_pdf_file(filepath: str) -> str:\n", + " \"\"\"Extract text from a PDF file using PyPDF2.\"\"\"\n", + " try:\n", + " with open(filepath, \"rb\") as file:\n", + " pdf_reader = PyPDF2.PdfReader(file)\n", + " text = \"\"\n", + " for page_num in range(len(pdf_reader.pages)):\n", + " page = pdf_reader.pages[page_num]\n", + " text += page.extract_text()\n", + " return text.strip()\n", + " except Exception as e:\n", + " print(f\"❌ Error reading PDF file: {e}\")\n", + " return \"\"\n", + "\n", + "\n", + "def read_file(filepath: str) -> str:\n", + " \"\"\"Read a file (txt or pdf) and return its content as text.\"\"\"\n", + " if os.path.splitext(filepath)[1].lower() == \".txt\":\n", + " return read_txt_file(filepath)\n", + " elif os.path.splitext(filepath)[1].lower() == \".pdf\":\n", + " return read_pdf_file(filepath)\n", + " else:\n", + " print(f\"❌ Unsupported file type: {filepath}\")\n", + " return \"\"\n", + "\n", + "\n", + "\n", + "content = read_file(filepath=\"assets/sample_fr.txt\")\n", + "print(f\"File content:\\n{content}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "be450692", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Résumé FR: La pollution de l'air est un enjeu majeur. Il faut agir rapidement pour limiter les émissions. Le changement climatique est une menace majeure pour la planète.\n" + ] + } + ], + "source": [ + "from summarizer.summarize import generate_summary\n", + "\n", + "# Short test in French\n", + "text_fr = \"\"\"La pollution de l'air est un enjeu majeur. Il faut agir rapidement pour limiter les émissions. Le changement climatique est une menace majeure pour la planète.\n", + "Les experts estiment que si les émissions de gaz à effet de serre ne sont pas réduites de manière significative,\n", + " les températures mondiales continueront d'augmenter, provoquant des phénomènes météorologiques extrêmes, des inondations,\n", + " des sécheresses prolongées et la montée du niveau des mers.\"\"\"\n", + "\n", + "summary = generate_summary(text=text_fr)\n", + "print(\"Résumé FR:\", summary)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "8f748fea", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Your max_length is set to 100, but your input_length is only 18. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=9)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Résumé FR: L'air pollué est un enjeu majeur. La pollution de l'air est un facteur contributif majeur.\n" + ] + } + ], + "source": [ + "summary = generate_summary(text=\"La pollution de l'air est un enjeu majeur.\")\n", + "print(\"Résumé FR:\", summary)" + ] + }, + { + "cell_type": "markdown", + "id": "c56fb375", + "metadata": {}, + "source": [ + "### Web interface" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "33c8f38c", + "metadata": {}, + "outputs": [], + "source": [ + "import gradio as gr\n", + "from summarizer.summarize import generate_summary\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "8bcbe018", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Help on package gradio:\n", + "\n", + "NAME\n", + " gradio\n", + "\n", + "PACKAGE CONTENTS\n", + " _simple_templates (package)\n", + " analytics\n", + " blocks\n", + " blocks_events\n", + " chat_interface\n", + " cli (package)\n", + " component_meta\n", + " components (package)\n", + " context\n", + " data_classes\n", + " events\n", + " exceptions\n", + " external\n", + " external_utils\n", + " flagging\n", + " helpers\n", + " http_server\n", + " image_utils\n", + " interface\n", + " ipython_ext\n", + " layouts (package)\n", + " mcp\n", + " monitoring_dashboard\n", + " networking\n", + " node_server\n", + " oauth\n", + " pipelines\n", + " pipelines_utils\n", + " processing_utils\n", + " queueing\n", + " ranged_response\n", + " renderable\n", + " route_utils\n", + " routes\n", + " server_messages\n", + " sketch (package)\n", + " state_holder\n", + " templates\n", + " test_data (package)\n", + " themes (package)\n", + " tunneling\n", + " utils\n", + " wasm_utils\n", + "\n", + "CLASSES\n", + " abc.ABC(builtins.object)\n", + " gradio.flagging.FlaggingCallback\n", + " gradio.flagging.CSVLogger\n", + " gradio.flagging.SimpleCSVLogger\n", + " builtins.dict(builtins.object)\n", + " gradio.components.chatbot.MessageDict\n", + " gradio.oauth.OAuthProfile(builtins.dict, typing.Generic)\n", + " builtins.object\n", + " gradio.components.audio.WaveformOptions\n", + " gradio.components.chatbot.ChatMessage\n", + " gradio.components.image_editor.Eraser\n", + " gradio.components.image_editor.Brush\n", + " gradio.components.image_editor.LayerOptions\n", + " gradio.components.image_editor.WebcamOptions\n", + " gradio.events.EventData\n", + " gradio.events.CopyData\n", + " gradio.events.DeletedFileData\n", + " gradio.events.DownloadData\n", + " gradio.events.EditData\n", + " gradio.events.KeyUpData\n", + " gradio.events.LikeData\n", + " gradio.events.RetryData\n", + " gradio.events.SelectData\n", + " gradio.events.UndoData\n", + " gradio.oauth.OAuthToken\n", + " gradio.route_utils.Request\n", + " gradio.utils.FileSize\n", + " collections.abc.Iterable(builtins.object)\n", + " gradio.helpers.Progress\n", + " gradio.blocks.BlockContext(gradio.blocks.Block)\n", + " gradio.blocks.Blocks(gradio.blocks.BlockContext, gradio.blocks_events.BlocksEvents)\n", + " gradio.chat_interface.ChatInterface\n", + " gradio.interface.Interface\n", + " gradio.interface.TabbedInterface\n", + " gradio.layouts.accordion.Accordion\n", + " gradio.layouts.column.Column\n", + " gradio.layouts.group.Group\n", + " gradio.layouts.row.Row\n", + " gradio.layouts.sidebar.Sidebar\n", + " gradio.layouts.tabs.Tab\n", + " gradio.layouts.tabs.Tabs\n", + " gradio.blocks_events.BlocksEvents(builtins.object)\n", + " gradio.blocks.Blocks(gradio.blocks.BlockContext, gradio.blocks_events.BlocksEvents)\n", + " gradio.chat_interface.ChatInterface\n", + " gradio.interface.Interface\n", + " gradio.interface.TabbedInterface\n", + " gradio.components.base.Component(gradio.components.base.ComponentBase, gradio.blocks.Block)\n", + " gradio.components.annotated_image.AnnotatedImage\n", + " gradio.components.audio.Audio(gradio.components.base.StreamingInput, gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " gradio.templates.Microphone\n", + " gradio.components.browser_state.BrowserState\n", + " gradio.components.button.Button\n", + " gradio.components.clear_button.ClearButton\n", + " gradio.components.deep_link_button.DeepLinkButton\n", + " gradio.components.duplicate_button.DuplicateButton\n", + " gradio.components.login_button.LoginButton\n", + " gradio.components.chatbot.Chatbot\n", + " gradio.components.code.Code\n", + " gradio.components.color_picker.ColorPicker\n", + " gradio.components.dataframe.Dataframe\n", + " gradio.templates.List\n", + " gradio.templates.Matrix\n", + " gradio.templates.Numpy\n", + " gradio.components.dataset.Dataset\n", + " gradio.components.download_button.DownloadButton\n", + " gradio.components.file.File\n", + " gradio.templates.Files\n", + " gradio.components.file_explorer.FileExplorer\n", + " gradio.components.gallery.Gallery\n", + " gradio.components.highlighted_text.HighlightedText\n", + " gradio.components.html.HTML\n", + " gradio.components.image.Image(gradio.components.base.StreamingInput, gradio.components.base.Component)\n", + " gradio.components.image_editor.ImageEditor\n", + " gradio.templates.ImageMask\n", + " gradio.templates.Paint\n", + " gradio.templates.Sketchpad\n", + " gradio.components.imageslider.ImageSlider\n", + " gradio.components.json_component.JSON\n", + " gradio.components.label.Label\n", + " gradio.components.markdown.Markdown\n", + " gradio.components.model3d.Model3D\n", + " gradio.components.paramviewer.ParamViewer\n", + " gradio.components.plot.Plot\n", + " gradio.components.state.State\n", + " gradio.components.timer.Timer\n", + " gradio.components.upload_button.UploadButton\n", + " gradio.components.video.Video(gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " gradio.templates.PlayableVideo\n", + " gradio.components.base.FormComponent(gradio.components.base.Component)\n", + " gradio.components.checkbox.Checkbox\n", + " gradio.components.checkboxgroup.CheckboxGroup\n", + " gradio.components.datetime.DateTime\n", + " gradio.components.dropdown.Dropdown\n", + " gradio.components.multimodal_textbox.MultimodalTextbox\n", + " gradio.components.number.Number\n", + " gradio.components.radio.Radio\n", + " gradio.components.slider.Slider\n", + " gradio.components.textbox.Textbox\n", + " gradio.templates.TextArea\n", + " gradio.components.base.StreamingInput(builtins.object)\n", + " gradio.components.audio.Audio(gradio.components.base.StreamingInput, gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " gradio.templates.Microphone\n", + " gradio.components.image.Image(gradio.components.base.StreamingInput, gradio.components.base.Component)\n", + " gradio.components.base.StreamingOutput(builtins.object)\n", + " gradio.components.audio.Audio(gradio.components.base.StreamingInput, gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " gradio.templates.Microphone\n", + " gradio.components.video.Video(gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " gradio.templates.PlayableVideo\n", + " gradio.components.native_plot.NativePlot(gradio.components.base.Component)\n", + " gradio.components.native_plot.BarPlot\n", + " gradio.components.native_plot.LinePlot\n", + " gradio.components.native_plot.ScatterPlot\n", + " gradio.data_classes.GradioModel(gradio.data_classes.GradioBaseModel, pydantic.main.BaseModel)\n", + " gradio.data_classes.FileData\n", + " gradio.themes.base.ThemeClass(builtins.object)\n", + " gradio.themes.base.Base\n", + " gradio_client.exceptions.AppError(builtins.ValueError)\n", + " gradio.exceptions.Error\n", + " typing.Generic(builtins.object)\n", + " gradio.oauth.OAuthProfile(builtins.dict, typing.Generic)\n", + "\n", + " class Accordion(gradio.blocks.BlockContext)\n", + " | Accordion(label: 'str | None' = None, *, open: 'bool' = True, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True)\n", + " |\n", + " | Accordion is a layout element which can be toggled to show/hide the contained content.\n", + " | Example:\n", + " | with gr.Accordion(\"See Details\"):\n", + " | gr.Markdown(\"lorem ipsum\")\n", + " |\n", + " | Method resolution order:\n", + " | Accordion\n", + " | gradio.blocks.BlockContext\n", + " | gradio.blocks.Block\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | __init__(self, label: 'str | None' = None, *, open: 'bool' = True, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True)\n", + " | Parameters:\n", + " | label: name of accordion section.\n", + " | open: if True, accordion is open by default.\n", + " | elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | render: If False, this layout will not be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.\n", + " |\n", + " | collapse = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the Accordion is collapsed.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | expand = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the Accordion is expanded.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes defined here:\n", + " |\n", + " | EVENTS = ['expand', 'collapse']\n", + " |\n", + " | __abstractmethods__ = frozenset()\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.BlockContext:\n", + " |\n", + " | __enter__(self)\n", + " |\n", + " | __exit__(self, exc_type: 'type[BaseException] | None' = None, *args)\n", + " |\n", + " | add(self, child: 'Block')\n", + " |\n", + " | add_child(self, child: 'Block')\n", + " |\n", + " | fill_expected_parents(self)\n", + " |\n", + " | postprocess(self, y)\n", + " | Any postprocessing needed to be performed on a block context.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.BlockContext:\n", + " |\n", + " | get_component_class_id() -> 'str'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.BlockContext:\n", + " |\n", + " | component_class_id\n", + " |\n", + " | skip_api\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from gradio.blocks.BlockContext:\n", + " |\n", + " | FRONTEND_DIR = '../../frontend/'\n", + " |\n", + " | TEMPLATE_DIR = './templates/'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.Block:\n", + " |\n", + " | async async_move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This async version of the function is used when this is being called within\n", + " | a FastAPI route, as this is not blocking.\n", + " |\n", + " | get_block_class(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | Very similar to the get_block_name method, but this method is used to reconstruct a\n", + " | Gradio app that is loaded from a Space using gr.load(). This should generally\n", + " | NOT be overridden.\n", + " |\n", + " | get_block_name(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | This is used to identify the Svelte file to use in the frontend. Override this method\n", + " | if a component should use a different Svelte file than the default naming convention.\n", + " |\n", + " | get_config(self)\n", + " |\n", + " | get_expected_parent(self) -> 'type[BlockContext] | None'\n", + " |\n", + " | move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This sync version of the function is used when this is being called outside of\n", + " | a FastAPI route, e.g. when examples are being cached.\n", + " |\n", + " | render(self)\n", + " | Adds self into appropriate BlockContext\n", + " |\n", + " | serve_static_file(self, url_or_file_path: 'str | Path | dict | None') -> 'dict | None'\n", + " | If a file is a local file, moves it to the block's cache directory and returns\n", + " | a FileData-type dictionary corresponding to the file. If the file is a URL, returns a\n", + " | FileData-type dictionary corresponding to the URL. This ensures that the file is\n", + " | accessible in the frontend and can be served to users.\n", + " |\n", + " | Examples:\n", + " | >>> block.serve_static_file(\"https://gradio.app/logo.png\") -> {\"path\": \"https://gradio.app/logo.png\", \"url\": \"https://gradio.app/logo.png\"}\n", + " | >>> block.serve_static_file(\"logo.png\") -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " | >>> block.serve_static_file({\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}) -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " |\n", + " | unrender(self)\n", + " | Removes self from BlockContext if it has been rendered (otherwise does nothing).\n", + " | Removes self from the layout and collection of blocks, but does not delete any event triggers.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.Block:\n", + " |\n", + " | recover_kwargs(props: 'dict[str, Any]', additional_keys: 'list[str] | None' = None)\n", + " | Recovers kwargs from a dict of props.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.Block:\n", + " |\n", + " | constructor_args\n", + " | Get the arguments passed to the component's initializer.\n", + " |\n", + " | Only set classes whose metaclass is ComponentMeta\n", + " |\n", + " | events\n", + " |\n", + " | stateful\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data descriptors inherited from gradio.blocks.Block:\n", + " |\n", + " | __dict__\n", + " | dictionary for instance variables\n", + " |\n", + " | __weakref__\n", + " | list of weak references to the object\n", + "\n", + " class AnnotatedImage(gradio.components.base.Component)\n", + " | AnnotatedImage(value: 'tuple[np.ndarray | PIL.Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None' = None, *, format: 'str' = 'webp', show_legend: 'bool' = True, height: 'int | str | None' = None, width: 'int | str | None' = None, color_map: 'dict[str, str] | None' = None, label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, show_fullscreen_button: 'bool' = True)\n", + " |\n", + " | Creates a component to displays a base image and colored annotations on top of that image. Annotations can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).\n", + " | As this component does not accept user input, it is rarely used as an input component.\n", + " |\n", + " | Demos: image_segmentation\n", + " |\n", + " | Method resolution order:\n", + " | AnnotatedImage\n", + " | gradio.components.base.Component\n", + " | gradio.components.base.ComponentBase\n", + " | abc.ABC\n", + " | gradio.blocks.Block\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | __init__(self, value: 'tuple[np.ndarray | PIL.Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None' = None, *, format: 'str' = 'webp', show_legend: 'bool' = True, height: 'int | str | None' = None, width: 'int | str | None' = None, color_map: 'dict[str, str] | None' = None, label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, show_fullscreen_button: 'bool' = True)\n", + " | Parameters:\n", + " | value: Tuple of base image and list of (annotation, label) pairs.\n", + " | format: Format used to save images before it is returned to the front end, such as 'jpeg' or 'png'. This parameter only takes effect when the base image is returned from the prediction function as a numpy array or a PIL Image. The format should be supported by the PIL library.\n", + " | show_legend: If True, will show a legend of the annotations.\n", + " | height: The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed image file or numpy array, but will affect the displayed image.\n", + " | width: The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed image file or numpy array, but will affect the displayed image.\n", + " | color_map: A dictionary mapping labels to colors. The colors must be specified as hex codes.\n", + " | label: the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.\n", + " | every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.\n", + " | inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.\n", + " | show_label: if True, will display label.\n", + " | container: If True, will place the component in a container - providing some extra padding around the border.\n", + " | scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.\n", + " | min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.\n", + " | visible: If False, component will be hidden.\n", + " | elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.\n", + " | key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.\n", + " | show_fullscreen_button: If True, will show a button to allow the image to be viewed in fullscreen mode.\n", + " |\n", + " | example_payload(self) -> 'Any'\n", + " | An example input data for this component, e.g. what is passed to this component's preprocess() method.\n", + " | This is used to generate the docs for the View API page for Gradio apps using this component.\n", + " |\n", + " | example_value(self) -> 'Any'\n", + " | An example output data for this component, e.g. what is passed to this component's postprocess() method.\n", + " | This is used to generate an example value if this component is used as a template for a custom component.\n", + " |\n", + " | postprocess(self, value: 'tuple[np.ndarray | PIL.Image.Image | str, Sequence[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None') -> 'AnnotatedImageData | None'\n", + " | Parameters:\n", + " | value: Expects a a tuple of a base image and list of annotations: a `tuple[Image, list[Annotation]]`. The `Image` itself can be `str` filepath, `numpy.ndarray`, or `PIL.Image`. Each `Annotation` is a `tuple[Mask, str]`. The `Mask` can be either a `tuple` of 4 `int`'s representing the bounding box coordinates (x1, y1, x2, y2), or 0-1 confidence mask in the form of a `numpy.ndarray` of the same shape as the image, while the second element of the `Annotation` tuple is a `str` label.\n", + " | Returns:\n", + " | Tuple of base image file and list of annotations, with each annotation a two-part tuple where the first element image path of the mask, and the second element is the label.\n", + " |\n", + " | preprocess(self, payload: 'AnnotatedImageData | None') -> 'tuple[str, list[tuple[str, str]]] | None'\n", + " | Parameters:\n", + " | payload: Dict of base image and list of annotations.\n", + " | Returns:\n", + " | Passes its value as a `tuple` consisting of a `str` filepath to a base image and `list` of annotations. Each annotation itself is `tuple` of a mask (as a `str` filepath to image) and a `str` label.\n", + " |\n", + " | select = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | Event listener for when the user selects or deselects the AnnotatedImage. Uses event data gradio.SelectData to carry `value` referring to the label of the AnnotatedImage, and `selected` to refer to state of the AnnotatedImage. See EventData documentation on how to use this event data\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes defined here:\n", + " |\n", + " | EVENTS = ['select']\n", + " |\n", + " | __abstractmethods__ = frozenset()\n", + " |\n", + " | __annotations__ = {}\n", + " |\n", + " | data_model = 'dict[str, Any]'\n", + " | The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].\n", + " | Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output\n", + " |\n", + " | api_info_as_input(self) -> 'dict[str, Any]'\n", + " |\n", + " | api_info_as_output(self) -> 'dict[str, Any]'\n", + " |\n", + " | as_example(self, value)\n", + " | Deprecated and replaced by `process_example()`.\n", + " |\n", + " | attach_load_event(self, callable: 'Callable', every: 'Timer | float | None', inputs: 'Component | Sequence[Component] | set[Component] | None' = None)\n", + " | Add an event that runs `callable`, optionally at interval specified by `every`.\n", + " |\n", + " | example_inputs(self) -> 'Any'\n", + " | Deprecated and replaced by `example_payload()` and `example_value()`.\n", + " |\n", + " | flag(self, payload: 'Any', flag_dir: 'str | Path' = '') -> 'str'\n", + " | Write the component's value to a format that can be stored in a csv or jsonl format for flagging.\n", + " |\n", + " | get_config(self)\n", + " |\n", + " | process_example(self, value)\n", + " | Process the input data in a way that can be displayed by the examples dataset component in the front-end.\n", + " | By default, this calls the `.postprocess()` method of the component. However, if the `.postprocess()` method is\n", + " | computationally intensive, or returns a large payload, a custom implementation may be appropriate.\n", + " |\n", + " | For example, the `process_example()` method of the `gr.Audio()` component only returns the name of the file, not\n", + " | the processed audio file. The `.process_example()` method of the `gr.Dataframe()` returns the head of a dataframe\n", + " | instead of the full dataframe.\n", + " |\n", + " | The return value of this method must be json-serializable to put in the config.\n", + " |\n", + " | read_from_flag(self, payload: 'Any')\n", + " | Convert the data from the csv or jsonl file into the component state.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Static methods inherited from gradio.components.base.Component:\n", + " |\n", + " | get_load_fn_and_initial_value(value, inputs=None)\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.components.base.Component:\n", + " |\n", + " | skip_api\n", + " | Whether this component should be skipped from the api return value\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from gradio.components.base.Component:\n", + " |\n", + " | FRONTEND_DIR = '../../frontend/'\n", + " |\n", + " | TEMPLATE_DIR = './templates/'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | get_component_class_id() -> 'str'\n", + " |\n", + " | has_event(event: 'str | EventListener') -> 'bool'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data descriptors inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | __dict__\n", + " | dictionary for instance variables\n", + " |\n", + " | __weakref__\n", + " | list of weak references to the object\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.Block:\n", + " |\n", + " | async async_move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This async version of the function is used when this is being called within\n", + " | a FastAPI route, as this is not blocking.\n", + " |\n", + " | get_block_class(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | Very similar to the get_block_name method, but this method is used to reconstruct a\n", + " | Gradio app that is loaded from a Space using gr.load(). This should generally\n", + " | NOT be overridden.\n", + " |\n", + " | get_block_name(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | This is used to identify the Svelte file to use in the frontend. Override this method\n", + " | if a component should use a different Svelte file than the default naming convention.\n", + " |\n", + " | get_expected_parent(self) -> 'type[BlockContext] | None'\n", + " |\n", + " | move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This sync version of the function is used when this is being called outside of\n", + " | a FastAPI route, e.g. when examples are being cached.\n", + " |\n", + " | render(self)\n", + " | Adds self into appropriate BlockContext\n", + " |\n", + " | serve_static_file(self, url_or_file_path: 'str | Path | dict | None') -> 'dict | None'\n", + " | If a file is a local file, moves it to the block's cache directory and returns\n", + " | a FileData-type dictionary corresponding to the file. If the file is a URL, returns a\n", + " | FileData-type dictionary corresponding to the URL. This ensures that the file is\n", + " | accessible in the frontend and can be served to users.\n", + " |\n", + " | Examples:\n", + " | >>> block.serve_static_file(\"https://gradio.app/logo.png\") -> {\"path\": \"https://gradio.app/logo.png\", \"url\": \"https://gradio.app/logo.png\"}\n", + " | >>> block.serve_static_file(\"logo.png\") -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " | >>> block.serve_static_file({\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}) -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " |\n", + " | unrender(self)\n", + " | Removes self from BlockContext if it has been rendered (otherwise does nothing).\n", + " | Removes self from the layout and collection of blocks, but does not delete any event triggers.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.Block:\n", + " |\n", + " | recover_kwargs(props: 'dict[str, Any]', additional_keys: 'list[str] | None' = None)\n", + " | Recovers kwargs from a dict of props.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.Block:\n", + " |\n", + " | constructor_args\n", + " | Get the arguments passed to the component's initializer.\n", + " |\n", + " | Only set classes whose metaclass is ComponentMeta\n", + " |\n", + " | events\n", + " |\n", + " | stateful\n", + "\n", + " Annotatedimage = class AnnotatedImage(gradio.components.base.Component)\n", + " | Annotatedimage(value: 'tuple[np.ndarray | PIL.Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None' = None, *, format: 'str' = 'webp', show_legend: 'bool' = True, height: 'int | str | None' = None, width: 'int | str | None' = None, color_map: 'dict[str, str] | None' = None, label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, show_fullscreen_button: 'bool' = True)\n", + " |\n", + " | Creates a component to displays a base image and colored annotations on top of that image. Annotations can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).\n", + " | As this component does not accept user input, it is rarely used as an input component.\n", + " |\n", + " | Demos: image_segmentation\n", + " |\n", + " | Method resolution order:\n", + " | AnnotatedImage\n", + " | gradio.components.base.Component\n", + " | gradio.components.base.ComponentBase\n", + " | abc.ABC\n", + " | gradio.blocks.Block\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | __init__(self, value: 'tuple[np.ndarray | PIL.Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None' = None, *, format: 'str' = 'webp', show_legend: 'bool' = True, height: 'int | str | None' = None, width: 'int | str | None' = None, color_map: 'dict[str, str] | None' = None, label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, show_fullscreen_button: 'bool' = True)\n", + " | Parameters:\n", + " | value: Tuple of base image and list of (annotation, label) pairs.\n", + " | format: Format used to save images before it is returned to the front end, such as 'jpeg' or 'png'. This parameter only takes effect when the base image is returned from the prediction function as a numpy array or a PIL Image. The format should be supported by the PIL library.\n", + " | show_legend: If True, will show a legend of the annotations.\n", + " | height: The height of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed image file or numpy array, but will affect the displayed image.\n", + " | width: The width of the component, specified in pixels if a number is passed, or in CSS units if a string is passed. This has no effect on the preprocessed image file or numpy array, but will affect the displayed image.\n", + " | color_map: A dictionary mapping labels to colors. The colors must be specified as hex codes.\n", + " | label: the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.\n", + " | every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.\n", + " | inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.\n", + " | show_label: if True, will display label.\n", + " | container: If True, will place the component in a container - providing some extra padding around the border.\n", + " | scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.\n", + " | min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.\n", + " | visible: If False, component will be hidden.\n", + " | elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.\n", + " | key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.\n", + " | show_fullscreen_button: If True, will show a button to allow the image to be viewed in fullscreen mode.\n", + " |\n", + " | example_payload(self) -> 'Any'\n", + " | An example input data for this component, e.g. what is passed to this component's preprocess() method.\n", + " | This is used to generate the docs for the View API page for Gradio apps using this component.\n", + " |\n", + " | example_value(self) -> 'Any'\n", + " | An example output data for this component, e.g. what is passed to this component's postprocess() method.\n", + " | This is used to generate an example value if this component is used as a template for a custom component.\n", + " |\n", + " | postprocess(self, value: 'tuple[np.ndarray | PIL.Image.Image | str, Sequence[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None') -> 'AnnotatedImageData | None'\n", + " | Parameters:\n", + " | value: Expects a a tuple of a base image and list of annotations: a `tuple[Image, list[Annotation]]`. The `Image` itself can be `str` filepath, `numpy.ndarray`, or `PIL.Image`. Each `Annotation` is a `tuple[Mask, str]`. The `Mask` can be either a `tuple` of 4 `int`'s representing the bounding box coordinates (x1, y1, x2, y2), or 0-1 confidence mask in the form of a `numpy.ndarray` of the same shape as the image, while the second element of the `Annotation` tuple is a `str` label.\n", + " | Returns:\n", + " | Tuple of base image file and list of annotations, with each annotation a two-part tuple where the first element image path of the mask, and the second element is the label.\n", + " |\n", + " | preprocess(self, payload: 'AnnotatedImageData | None') -> 'tuple[str, list[tuple[str, str]]] | None'\n", + " | Parameters:\n", + " | payload: Dict of base image and list of annotations.\n", + " | Returns:\n", + " | Passes its value as a `tuple` consisting of a `str` filepath to a base image and `list` of annotations. Each annotation itself is `tuple` of a mask (as a `str` filepath to image) and a `str` label.\n", + " |\n", + " | select = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | Event listener for when the user selects or deselects the AnnotatedImage. Uses event data gradio.SelectData to carry `value` referring to the label of the AnnotatedImage, and `selected` to refer to state of the AnnotatedImage. See EventData documentation on how to use this event data\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes defined here:\n", + " |\n", + " | EVENTS = ['select']\n", + " |\n", + " | __abstractmethods__ = frozenset()\n", + " |\n", + " | __annotations__ = {}\n", + " |\n", + " | data_model = 'dict[str, Any]'\n", + " | The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].\n", + " | Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output\n", + " |\n", + " | api_info_as_input(self) -> 'dict[str, Any]'\n", + " |\n", + " | api_info_as_output(self) -> 'dict[str, Any]'\n", + " |\n", + " | as_example(self, value)\n", + " | Deprecated and replaced by `process_example()`.\n", + " |\n", + " | attach_load_event(self, callable: 'Callable', every: 'Timer | float | None', inputs: 'Component | Sequence[Component] | set[Component] | None' = None)\n", + " | Add an event that runs `callable`, optionally at interval specified by `every`.\n", + " |\n", + " | example_inputs(self) -> 'Any'\n", + " | Deprecated and replaced by `example_payload()` and `example_value()`.\n", + " |\n", + " | flag(self, payload: 'Any', flag_dir: 'str | Path' = '') -> 'str'\n", + " | Write the component's value to a format that can be stored in a csv or jsonl format for flagging.\n", + " |\n", + " | get_config(self)\n", + " |\n", + " | process_example(self, value)\n", + " | Process the input data in a way that can be displayed by the examples dataset component in the front-end.\n", + " | By default, this calls the `.postprocess()` method of the component. However, if the `.postprocess()` method is\n", + " | computationally intensive, or returns a large payload, a custom implementation may be appropriate.\n", + " |\n", + " | For example, the `process_example()` method of the `gr.Audio()` component only returns the name of the file, not\n", + " | the processed audio file. The `.process_example()` method of the `gr.Dataframe()` returns the head of a dataframe\n", + " | instead of the full dataframe.\n", + " |\n", + " | The return value of this method must be json-serializable to put in the config.\n", + " |\n", + " | read_from_flag(self, payload: 'Any')\n", + " | Convert the data from the csv or jsonl file into the component state.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Static methods inherited from gradio.components.base.Component:\n", + " |\n", + " | get_load_fn_and_initial_value(value, inputs=None)\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.components.base.Component:\n", + " |\n", + " | skip_api\n", + " | Whether this component should be skipped from the api return value\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from gradio.components.base.Component:\n", + " |\n", + " | FRONTEND_DIR = '../../frontend/'\n", + " |\n", + " | TEMPLATE_DIR = './templates/'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | get_component_class_id() -> 'str'\n", + " |\n", + " | has_event(event: 'str | EventListener') -> 'bool'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data descriptors inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | __dict__\n", + " | dictionary for instance variables\n", + " |\n", + " | __weakref__\n", + " | list of weak references to the object\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.Block:\n", + " |\n", + " | async async_move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This async version of the function is used when this is being called within\n", + " | a FastAPI route, as this is not blocking.\n", + " |\n", + " | get_block_class(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | Very similar to the get_block_name method, but this method is used to reconstruct a\n", + " | Gradio app that is loaded from a Space using gr.load(). This should generally\n", + " | NOT be overridden.\n", + " |\n", + " | get_block_name(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | This is used to identify the Svelte file to use in the frontend. Override this method\n", + " | if a component should use a different Svelte file than the default naming convention.\n", + " |\n", + " | get_expected_parent(self) -> 'type[BlockContext] | None'\n", + " |\n", + " | move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This sync version of the function is used when this is being called outside of\n", + " | a FastAPI route, e.g. when examples are being cached.\n", + " |\n", + " | render(self)\n", + " | Adds self into appropriate BlockContext\n", + " |\n", + " | serve_static_file(self, url_or_file_path: 'str | Path | dict | None') -> 'dict | None'\n", + " | If a file is a local file, moves it to the block's cache directory and returns\n", + " | a FileData-type dictionary corresponding to the file. If the file is a URL, returns a\n", + " | FileData-type dictionary corresponding to the URL. This ensures that the file is\n", + " | accessible in the frontend and can be served to users.\n", + " |\n", + " | Examples:\n", + " | >>> block.serve_static_file(\"https://gradio.app/logo.png\") -> {\"path\": \"https://gradio.app/logo.png\", \"url\": \"https://gradio.app/logo.png\"}\n", + " | >>> block.serve_static_file(\"logo.png\") -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " | >>> block.serve_static_file({\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}) -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " |\n", + " | unrender(self)\n", + " | Removes self from BlockContext if it has been rendered (otherwise does nothing).\n", + " | Removes self from the layout and collection of blocks, but does not delete any event triggers.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.Block:\n", + " |\n", + " | recover_kwargs(props: 'dict[str, Any]', additional_keys: 'list[str] | None' = None)\n", + " | Recovers kwargs from a dict of props.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.Block:\n", + " |\n", + " | constructor_args\n", + " | Get the arguments passed to the component's initializer.\n", + " |\n", + " | Only set classes whose metaclass is ComponentMeta\n", + " |\n", + " | events\n", + " |\n", + " | stateful\n", + "\n", + " class Audio(gradio.components.base.StreamingInput, gradio.components.base.StreamingOutput, gradio.components.base.Component)\n", + " | Audio(value: 'str | Path | tuple[int, np.ndarray] | Callable | None' = None, *, sources: \"list[Literal['upload', 'microphone']] | Literal['upload', 'microphone'] | None\" = None, type: \"Literal['numpy', 'filepath']\" = 'numpy', label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, interactive: 'bool | None' = None, visible: 'bool' = True, streaming: 'bool' = False, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, format: \"Literal['wav', 'mp3'] | None\" = None, autoplay: 'bool' = False, show_download_button: 'bool | None' = None, show_share_button: 'bool | None' = None, editable: 'bool' = True, min_length: 'int | None' = None, max_length: 'int | None' = None, waveform_options: 'WaveformOptions | dict | None' = None, loop: 'bool' = False, recording: 'bool' = False)\n", + " |\n", + " | Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).\n", + " | Demos: generate_tone, reverse_audio\n", + " | Guides: real-time-speech-recognition\n", + " |\n", + " | Method resolution order:\n", + " | Audio\n", + " | gradio.components.base.StreamingInput\n", + " | gradio.components.base.StreamingOutput\n", + " | gradio.components.base.Component\n", + " | gradio.components.base.ComponentBase\n", + " | abc.ABC\n", + " | gradio.blocks.Block\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | __init__(self, value: 'str | Path | tuple[int, np.ndarray] | Callable | None' = None, *, sources: \"list[Literal['upload', 'microphone']] | Literal['upload', 'microphone'] | None\" = None, type: \"Literal['numpy', 'filepath']\" = 'numpy', label: 'str | None' = None, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | set[Component] | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, interactive: 'bool | None' = None, visible: 'bool' = True, streaming: 'bool' = False, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, format: \"Literal['wav', 'mp3'] | None\" = None, autoplay: 'bool' = False, show_download_button: 'bool | None' = None, show_share_button: 'bool | None' = None, editable: 'bool' = True, min_length: 'int | None' = None, max_length: 'int | None' = None, waveform_options: 'WaveformOptions | dict | None' = None, loop: 'bool' = False, recording: 'bool' = False)\n", + " | Parameters:\n", + " | value: A path, URL, or [sample_rate, numpy array] tuple (sample rate in Hz, audio data as a float or int numpy array) for the default value that Audio component is going to take. If a function is provided, the function will be called each time the app loads to set the initial value of this component.\n", + " | sources: A list of sources permitted for audio. \"upload\" creates a box where user can drop an audio file, \"microphone\" creates a microphone input. The first element in the list will be used as the default source. If None, defaults to [\"upload\", \"microphone\"], or [\"microphone\"] if `streaming` is True.\n", + " | type: The format the audio file is converted to before being passed into the prediction function. \"numpy\" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), \"filepath\" passes a str path to a temporary file containing the audio.\n", + " | label: the label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to.\n", + " | every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.\n", + " | inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.\n", + " | show_label: if True, will display label.\n", + " | container: If True, will place the component in a container - providing some extra padding around the border.\n", + " | scale: Relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.\n", + " | min_width: Minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.\n", + " | interactive: If True, will allow users to upload and edit an audio file. If False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output.\n", + " | visible: If False, component will be hidden.\n", + " | streaming: If set to True when used in a `live` interface as an input, will automatically stream webcam feed. When used set as an output, takes audio chunks yield from the backend and combines them into one streaming audio output.\n", + " | elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | render: if False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.\n", + " | key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.\n", + " | format: the file extension with which to save audio files. Either 'wav' or 'mp3'. wav files are lossless but will tend to be larger files. mp3 files tend to be smaller. This parameter applies both when this component is used as an input (and `type` is \"filepath\") to determine which file format to convert user-provided audio to, and when this component is used as an output to determine the format of audio returned to the user. If None, no file format conversion is done and the audio is kept as is. In the case where output audio is returned from the prediction function as numpy array and no `format` is provided, it will be returned as a \"wav\" file.\n", + " | autoplay: Whether to automatically play the audio when the component is used as an output. Note: browsers will not autoplay audio files if the user has not interacted with the page yet.\n", + " | show_download_button: If True, will show a download button in the corner of the component for saving audio. If False, icon does not appear. By default, it will be True for output components and False for input components.\n", + " | show_share_button: If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.\n", + " | editable: If True, allows users to manipulate the audio file if the component is interactive. Defaults to True.\n", + " | min_length: The minimum length of audio (in seconds) that the user can pass into the prediction function. If None, there is no minimum length.\n", + " | max_length: The maximum length of audio (in seconds) that the user can pass into the prediction function. If None, there is no maximum length.\n", + " | waveform_options: A dictionary of options for the waveform display. Options include: waveform_color (str), waveform_progress_color (str), show_controls (bool), skip_length (int), trim_region_color (str). Default is None, which uses the default values for these options. [See `gr.WaveformOptions` docs](#waveform-options).\n", + " | loop: If True, the audio will loop when it reaches the end and continue playing from the beginning.\n", + " | recording: If True, the audio component will be set to record audio from the microphone if the source is set to \"microphone\". Defaults to False.\n", + " |\n", + " | change = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | Triggered when the value of the Audio changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | check_streamable(self)\n", + " | Used to check if streaming is supported given the input.\n", + " |\n", + " | clear = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user clears the Audio using the clear button for the component.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | async combine_stream(self, stream: 'list[bytes]', desired_output_format: 'str | None' = None, only_file=False) -> 'FileData'\n", + " | Combine all of the stream chunks into a single file.\n", + " |\n", + " | This is needed for downloading the stream and for caching examples.\n", + " | If `only_file` is True, only the FileData corresponding to the file should be returned (needed for downloading the stream).\n", + " | The desired_output_format optionally converts the combined file. Should only be used for cached examples.\n", + " |\n", + " | example_payload(self) -> 'Any'\n", + " | An example input data for this component, e.g. what is passed to this component's preprocess() method.\n", + " | This is used to generate the docs for the View API page for Gradio apps using this component.\n", + " |\n", + " | example_value(self) -> 'Any'\n", + " | An example output data for this component, e.g. what is passed to this component's postprocess() method.\n", + " | This is used to generate an example value if this component is used as a template for a custom component.\n", + " |\n", + " | input = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user changes the value of the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | pause = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the media in the Audio stops for any reason.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | pause_recording = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user pauses recording with the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | play = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user plays the media in the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | postprocess(self, value: 'str | Path | bytes | tuple[int, np.ndarray] | None') -> 'FileData | bytes | None'\n", + " | Parameters:\n", + " | value: expects audio data in any of these formats: a `str` or `pathlib.Path` filepath or URL to an audio file, or a `bytes` object (recommended for streaming), or a `tuple` of (sample rate in Hz, audio data as numpy array). Note: if audio is supplied as a numpy array, the audio will be normalized by its peak value to avoid distortion or clipping in the resulting audio.\n", + " | Returns:\n", + " | FileData object, bytes, or None.\n", + " |\n", + " | preprocess(self, payload: 'FileData | None') -> 'str | tuple[int, np.ndarray] | None'\n", + " | Parameters:\n", + " | payload: audio data as a FileData object, or None.\n", + " | Returns:\n", + " | passes audio as one of these formats (depending on `type`): a `str` filepath, or `tuple` of (sample rate in Hz, audio data as numpy array). If the latter, the audio data is a 16-bit `int` array whose values range from -32768 to 32767 and shape of the audio data array is (samples,) for mono audio or (samples, channels) for multi-channel audio.\n", + " |\n", + " | process_example(self, value: 'tuple[int, np.ndarray] | str | Path | bytes | None') -> 'str'\n", + " | Process the input data in a way that can be displayed by the examples dataset component in the front-end.\n", + " | By default, this calls the `.postprocess()` method of the component. However, if the `.postprocess()` method is\n", + " | computationally intensive, or returns a large payload, a custom implementation may be appropriate.\n", + " |\n", + " | For example, the `process_example()` method of the `gr.Audio()` component only returns the name of the file, not\n", + " | the processed audio file. The `.process_example()` method of the `gr.Dataframe()` returns the head of a dataframe\n", + " | instead of the full dataframe.\n", + " |\n", + " | The return value of this method must be json-serializable to put in the config.\n", + " |\n", + " | start_recording = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user starts recording with the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | stop = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user reaches the end of the media playing in the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | stop_recording = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user stops recording with the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | stream = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'minimal', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user streams the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | async stream_output(self, value, output_id: 'str', first_chunk: 'bool') -> 'tuple[MediaStreamChunk | None, FileDataDict]'\n", + " |\n", + " | upload = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | This listener is triggered when the user uploads a file into the Audio.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Static methods defined here:\n", + " |\n", + " | async covert_to_adts(data: 'bytes') -> 'tuple[bytes, float]'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes defined here:\n", + " |\n", + " | EVENTS = ['stream', 'change', 'clear', 'play', 'pause', 'stop', 'pause...\n", + " |\n", + " | __abstractmethods__ = frozenset()\n", + " |\n", + " | __annotations__ = {}\n", + " |\n", + " | data_model = \n", + " | The FileData class is a subclass of the GradioModel class that represents a file object within a Gradio interface. It is used to store file data and metadata when a file is uploaded.\n", + " |\n", + " | Attributes:\n", + " | path: The server file path where the file is stored.\n", + " | url: The normalized server URL pointing to the file.\n", + " | size: The size of the file in bytes.\n", + " | orig_name: The original filename before upload.\n", + " | mime_type: The MIME type of the file.\n", + " | is_stream: Indicates whether the file is a stream.\n", + " | meta: Additional metadata used internally (should not be changed).\n", + " |\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data descriptors inherited from gradio.components.base.StreamingInput:\n", + " |\n", + " | __dict__\n", + " | dictionary for instance variables\n", + " |\n", + " | __weakref__\n", + " | list of weak references to the object\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.components.base.Component:\n", + " |\n", + " | api_info(self) -> 'dict[str, Any]'\n", + " | The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].\n", + " | Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output\n", + " |\n", + " | api_info_as_input(self) -> 'dict[str, Any]'\n", + " |\n", + " | api_info_as_output(self) -> 'dict[str, Any]'\n", + " |\n", + " | as_example(self, value)\n", + " | Deprecated and replaced by `process_example()`.\n", + " |\n", + " | attach_load_event(self, callable: 'Callable', every: 'Timer | float | None', inputs: 'Component | Sequence[Component] | set[Component] | None' = None)\n", + " | Add an event that runs `callable`, optionally at interval specified by `every`.\n", + " |\n", + " | example_inputs(self) -> 'Any'\n", + " | Deprecated and replaced by `example_payload()` and `example_value()`.\n", + " |\n", + " | flag(self, payload: 'Any', flag_dir: 'str | Path' = '') -> 'str'\n", + " | Write the component's value to a format that can be stored in a csv or jsonl format for flagging.\n", + " |\n", + " | get_config(self)\n", + " |\n", + " | read_from_flag(self, payload: 'Any')\n", + " | Convert the data from the csv or jsonl file into the component state.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Static methods inherited from gradio.components.base.Component:\n", + " |\n", + " | get_load_fn_and_initial_value(value, inputs=None)\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.components.base.Component:\n", + " |\n", + " | skip_api\n", + " | Whether this component should be skipped from the api return value\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from gradio.components.base.Component:\n", + " |\n", + " | FRONTEND_DIR = '../../frontend/'\n", + " |\n", + " | TEMPLATE_DIR = './templates/'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | get_component_class_id() -> 'str'\n", + " |\n", + " | has_event(event: 'str | EventListener') -> 'bool'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.Block:\n", + " |\n", + " | async async_move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This async version of the function is used when this is being called within\n", + " | a FastAPI route, as this is not blocking.\n", + " |\n", + " | get_block_class(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | Very similar to the get_block_name method, but this method is used to reconstruct a\n", + " | Gradio app that is loaded from a Space using gr.load(). This should generally\n", + " | NOT be overridden.\n", + " |\n", + " | get_block_name(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | This is used to identify the Svelte file to use in the frontend. Override this method\n", + " | if a component should use a different Svelte file than the default naming convention.\n", + " |\n", + " | get_expected_parent(self) -> 'type[BlockContext] | None'\n", + " |\n", + " | move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This sync version of the function is used when this is being called outside of\n", + " | a FastAPI route, e.g. when examples are being cached.\n", + " |\n", + " | render(self)\n", + " | Adds self into appropriate BlockContext\n", + " |\n", + " | serve_static_file(self, url_or_file_path: 'str | Path | dict | None') -> 'dict | None'\n", + " | If a file is a local file, moves it to the block's cache directory and returns\n", + " | a FileData-type dictionary corresponding to the file. If the file is a URL, returns a\n", + " | FileData-type dictionary corresponding to the URL. This ensures that the file is\n", + " | accessible in the frontend and can be served to users.\n", + " |\n", + " | Examples:\n", + " | >>> block.serve_static_file(\"https://gradio.app/logo.png\") -> {\"path\": \"https://gradio.app/logo.png\", \"url\": \"https://gradio.app/logo.png\"}\n", + " | >>> block.serve_static_file(\"logo.png\") -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " | >>> block.serve_static_file({\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}) -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " |\n", + " | unrender(self)\n", + " | Removes self from BlockContext if it has been rendered (otherwise does nothing).\n", + " | Removes self from the layout and collection of blocks, but does not delete any event triggers.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.Block:\n", + " |\n", + " | recover_kwargs(props: 'dict[str, Any]', additional_keys: 'list[str] | None' = None)\n", + " | Recovers kwargs from a dict of props.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.Block:\n", + " |\n", + " | constructor_args\n", + " | Get the arguments passed to the component's initializer.\n", + " |\n", + " | Only set classes whose metaclass is ComponentMeta\n", + " |\n", + " | events\n", + " |\n", + " | stateful\n", + "\n", + " class BarPlot(NativePlot)\n", + " | BarPlot(value: 'pd.DataFrame | Callable | None' = None, x: 'str | None' = None, y: 'str | None' = None, *, color: 'str | None' = None, title: 'str | None' = None, x_title: 'str | None' = None, y_title: 'str | None' = None, color_title: 'str | None' = None, x_bin: 'str | float | None' = None, y_aggregate: \"Literal['sum', 'mean', 'median', 'min', 'max', 'count'] | None\" = None, color_map: 'dict[str, str] | None' = None, x_lim: 'list[float] | None' = None, y_lim: 'list[float] | None' = None, x_label_angle: 'float' = 0, y_label_angle: 'float' = 0, x_axis_labels_visible: 'bool' = True, caption: 'str | None' = None, sort: \"Literal['x', 'y', '-x', '-y'] | list[str] | None\" = None, tooltip: \"Literal['axis', 'none', 'all'] | list[str]\" = 'axis', height: 'int | None' = None, label: 'str | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | Set[Component] | None' = None, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, **kwargs)\n", + " |\n", + " | Creates a bar plot component to display data from a pandas DataFrame.\n", + " |\n", + " | Demos: bar_plot_demo\n", + " |\n", + " | Method resolution order:\n", + " | BarPlot\n", + " | NativePlot\n", + " | gradio.components.base.Component\n", + " | gradio.components.base.ComponentBase\n", + " | abc.ABC\n", + " | gradio.blocks.Block\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | get_block_name(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | This is used to identify the Svelte file to use in the frontend. Override this method\n", + " | if a component should use a different Svelte file than the default naming convention.\n", + " |\n", + " | get_mark(self) -> 'str'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes defined here:\n", + " |\n", + " | __abstractmethods__ = frozenset()\n", + " |\n", + " | __annotations__ = {}\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from NativePlot:\n", + " |\n", + " | __init__(self, value: 'pd.DataFrame | Callable | None' = None, x: 'str | None' = None, y: 'str | None' = None, *, color: 'str | None' = None, title: 'str | None' = None, x_title: 'str | None' = None, y_title: 'str | None' = None, color_title: 'str | None' = None, x_bin: 'str | float | None' = None, y_aggregate: \"Literal['sum', 'mean', 'median', 'min', 'max', 'count'] | None\" = None, color_map: 'dict[str, str] | None' = None, x_lim: 'list[float] | None' = None, y_lim: 'list[float] | None' = None, x_label_angle: 'float' = 0, y_label_angle: 'float' = 0, x_axis_labels_visible: 'bool' = True, caption: 'str | None' = None, sort: \"Literal['x', 'y', '-x', '-y'] | list[str] | None\" = None, tooltip: \"Literal['axis', 'none', 'all'] | list[str]\" = 'axis', height: 'int | None' = None, label: 'str | None' = None, show_label: 'bool | None' = None, container: 'bool' = True, scale: 'int | None' = None, min_width: 'int' = 160, every: 'Timer | float | None' = None, inputs: 'Component | Sequence[Component] | Set[Component] | None' = None, visible: 'bool' = True, elem_id: 'str | None' = None, elem_classes: 'list[str] | str | None' = None, render: 'bool' = True, key: 'int | str | None' = None, **kwargs)\n", + " | Parameters:\n", + " | value: The pandas dataframe containing the data to display in the plot.\n", + " | x: Column corresponding to the x axis. Column can be numeric, datetime, or string/category.\n", + " | y: Column corresponding to the y axis. Column must be numeric.\n", + " | color: Column corresponding to series, visualized by color. Column must be string/category.\n", + " | title: The title to display on top of the chart.\n", + " | x_title: The title given to the x axis. By default, uses the value of the x parameter.\n", + " | y_title: The title given to the y axis. By default, uses the value of the y parameter.\n", + " | color_title: The title given to the color legend. By default, uses the value of color parameter.\n", + " | x_bin: Grouping used to cluster x values. If x column is numeric, should be number to bin the x values. If x column is datetime, should be string such as \"1h\", \"15m\", \"10s\", using \"s\", \"m\", \"h\", \"d\" suffixes.\n", + " | y_aggregate: Aggregation function used to aggregate y values, used if x_bin is provided or x is a string/category. Must be one of \"sum\", \"mean\", \"median\", \"min\", \"max\".\n", + " | color_map: Mapping of series to color names or codes. For example, {\"success\": \"green\", \"fail\": \"#FF8888\"}.\n", + " | height: The height of the plot in pixels.\n", + " | x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max]. If x column is datetime type, x_lim should be timestamps.\n", + " | y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].\n", + " | x_label_angle: The angle of the x-axis labels in degrees offset clockwise.\n", + " | y_label_angle: The angle of the y-axis labels in degrees offset clockwise.\n", + " | x_axis_labels_visible: Whether the x-axis labels should be visible. Can be hidden when many x-axis labels are present.\n", + " | caption: The (optional) caption to display below the plot.\n", + " | sort: The sorting order of the x values, if x column is type string/category. Can be \"x\", \"y\", \"-x\", \"-y\", or list of strings that represent the order of the categories.\n", + " | tooltip: The tooltip to display when hovering on a point. \"axis\" shows the values for the axis columns, \"all\" shows all column values, and \"none\" shows no tooltips. Can also provide a list of strings representing columns to show in the tooltip, which will be displayed along with axis values.\n", + " | height: The height of the plot in pixels.\n", + " | label: The (optional) label to display on the top left corner of the plot.\n", + " | show_label: Whether the label should be displayed.\n", + " | container: If True, will place the component in a container - providing some extra padding around the border.\n", + " | scale: relative size compared to adjacent Components. For example if Components A and B are in a Row, and A has scale=2, and B has scale=1, A will be twice as wide as B. Should be an integer. scale applies in Rows, and to top-level Components in Blocks where fill_height=True.\n", + " | min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.\n", + " | every: Continously calls `value` to recalculate it if `value` is a function (has no effect otherwise). Can provide a Timer whose tick resets `value`, or a float that provides the regular interval for the reset Timer.\n", + " | inputs: Components that are used as inputs to calculate `value` if `value` is a function (has no effect otherwise). `value` is recalculated any time the inputs change.\n", + " | visible: Whether the plot should be visible.\n", + " | elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.\n", + " | render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later.\n", + " | key: if assigned, will be used to assume identity across a re-render. Components that have the same key across a re-render will have their value preserved.\n", + " |\n", + " | api_info(self) -> 'dict[str, Any]'\n", + " | The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description].\n", + " | Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output\n", + " |\n", + " | double_click = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | Triggered when the NativePlot is double clicked.\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | example_payload(self) -> 'Any'\n", + " | An example input data for this component, e.g. what is passed to this component's preprocess() method.\n", + " | This is used to generate the docs for the View API page for Gradio apps using this component.\n", + " |\n", + " | example_value(self) -> 'Any'\n", + " | An example output data for this component, e.g. what is passed to this component's postprocess() method.\n", + " | This is used to generate an example value if this component is used as a template for a custom component.\n", + " |\n", + " | postprocess(self, value: 'pd.DataFrame | dict | None') -> 'PlotData | None'\n", + " | Parameters:\n", + " | value: Expects a pandas DataFrame containing the data to display in the line plot. The DataFrame should contain at least two columns, one for the x-axis (corresponding to this component's `x` argument) and one for the y-axis (corresponding to `y`).\n", + " | Returns:\n", + " | The data to display in a line plot, in the form of an AltairPlotData dataclass, which includes the plot information as a JSON string, as well as the type of plot (in this case, \"line\").\n", + " |\n", + " | preprocess(self, payload: 'PlotData | None') -> 'PlotData | None'\n", + " | Parameters:\n", + " | payload: The data to display in a line plot.\n", + " | Returns:\n", + " | The data to display in a line plot.\n", + " |\n", + " | select = event_trigger(block: 'Block | None', fn: \"Callable | None | Literal['decorator']\" = 'decorator', inputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, outputs: 'Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None' = None, api_name: 'str | None | Literal[False]' = None, scroll_to_output: 'bool' = False, show_progress: \"Literal['full', 'minimal', 'hidden']\" = 'full', show_progress_on: 'Component | Sequence[Component] | None' = None, queue: 'bool' = True, batch: 'bool' = False, max_batch_size: 'int' = 4, preprocess: 'bool' = True, postprocess: 'bool' = True, cancels: 'dict[str, Any] | list[dict[str, Any]] | None' = None, trigger_mode: \"Literal['once', 'multiple', 'always_last'] | None\" = None, js: 'str | Literal[True] | None' = None, concurrency_limit: \"int | None | Literal['default']\" = 'default', concurrency_id: 'str | None' = None, show_api: 'bool' = True, time_limit: 'int | None' = None, stream_every: 'float' = 0.5, like_user_message: 'bool' = False) -> 'Dependency' from gradio.events.EventListener._setup.\n", + " | Event listener for when the user selects or deselects the NativePlot. Uses event data gradio.SelectData to carry `value` referring to the label of the NativePlot, and `selected` to refer to state of the NativePlot. See EventData documentation on how to use this event data\n", + " | Parameters:\n", + " | fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.\n", + " | inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.\n", + " | outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.\n", + " | api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event.\n", + " | scroll_to_output: If True, will scroll to output component on completion\n", + " | show_progress: how to show the progress animation while event is running: \"full\" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, \"minimal\" only shows the runtime display, \"hidden\" shows no progress animation at all\n", + " | show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components.\n", + " | queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.\n", + " | batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.\n", + " | max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)\n", + " | preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).\n", + " | postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.\n", + " | cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.\n", + " | trigger_mode: If \"once\" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to \"multiple\", unlimited submissions are allowed while pending, and \"always_last\" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete.\n", + " | js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.\n", + " | concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to \"default\" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `Blocks.queue()`, which itself is 1 by default).\n", + " | concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit.\n", + " | show_api: whether to show this event in the \"view API\" page of the Gradio app, or in the \".view_api()\" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from NativePlot:\n", + " |\n", + " | EVENTS = ['select', 'double_click']\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.components.base.Component:\n", + " |\n", + " | api_info_as_input(self) -> 'dict[str, Any]'\n", + " |\n", + " | api_info_as_output(self) -> 'dict[str, Any]'\n", + " |\n", + " | as_example(self, value)\n", + " | Deprecated and replaced by `process_example()`.\n", + " |\n", + " | attach_load_event(self, callable: 'Callable', every: 'Timer | float | None', inputs: 'Component | Sequence[Component] | set[Component] | None' = None)\n", + " | Add an event that runs `callable`, optionally at interval specified by `every`.\n", + " |\n", + " | example_inputs(self) -> 'Any'\n", + " | Deprecated and replaced by `example_payload()` and `example_value()`.\n", + " |\n", + " | flag(self, payload: 'Any', flag_dir: 'str | Path' = '') -> 'str'\n", + " | Write the component's value to a format that can be stored in a csv or jsonl format for flagging.\n", + " |\n", + " | get_config(self)\n", + " |\n", + " | process_example(self, value)\n", + " | Process the input data in a way that can be displayed by the examples dataset component in the front-end.\n", + " | By default, this calls the `.postprocess()` method of the component. However, if the `.postprocess()` method is\n", + " | computationally intensive, or returns a large payload, a custom implementation may be appropriate.\n", + " |\n", + " | For example, the `process_example()` method of the `gr.Audio()` component only returns the name of the file, not\n", + " | the processed audio file. The `.process_example()` method of the `gr.Dataframe()` returns the head of a dataframe\n", + " | instead of the full dataframe.\n", + " |\n", + " | The return value of this method must be json-serializable to put in the config.\n", + " |\n", + " | read_from_flag(self, payload: 'Any')\n", + " | Convert the data from the csv or jsonl file into the component state.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Static methods inherited from gradio.components.base.Component:\n", + " |\n", + " | get_load_fn_and_initial_value(value, inputs=None)\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.components.base.Component:\n", + " |\n", + " | skip_api\n", + " | Whether this component should be skipped from the api return value\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data and other attributes inherited from gradio.components.base.Component:\n", + " |\n", + " | FRONTEND_DIR = '../../frontend/'\n", + " |\n", + " | TEMPLATE_DIR = './templates/'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | get_component_class_id() -> 'str'\n", + " |\n", + " | has_event(event: 'str | EventListener') -> 'bool'\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Data descriptors inherited from gradio.components.base.ComponentBase:\n", + " |\n", + " | __dict__\n", + " | dictionary for instance variables\n", + " |\n", + " | __weakref__\n", + " | list of weak references to the object\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Methods inherited from gradio.blocks.Block:\n", + " |\n", + " | async async_move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This async version of the function is used when this is being called within\n", + " | a FastAPI route, as this is not blocking.\n", + " |\n", + " | get_block_class(self) -> 'str'\n", + " | Gets block's class name. If it is template component it gets the parent's class name.\n", + " | Very similar to the get_block_name method, but this method is used to reconstruct a\n", + " | Gradio app that is loaded from a Space using gr.load(). This should generally\n", + " | NOT be overridden.\n", + " |\n", + " | get_expected_parent(self) -> 'type[BlockContext] | None'\n", + " |\n", + " | move_resource_to_block_cache(self, url_or_file_path: 'str | Path | None') -> 'str | None'\n", + " | Moves a file or downloads a file from a url to a block's cache directory, adds\n", + " | to to the block's temp_files, and returns the path to the file in cache. This\n", + " | ensures that the file is accessible to the Block and can be served to users.\n", + " |\n", + " | This sync version of the function is used when this is being called outside of\n", + " | a FastAPI route, e.g. when examples are being cached.\n", + " |\n", + " | render(self)\n", + " | Adds self into appropriate BlockContext\n", + " |\n", + " | serve_static_file(self, url_or_file_path: 'str | Path | dict | None') -> 'dict | None'\n", + " | If a file is a local file, moves it to the block's cache directory and returns\n", + " | a FileData-type dictionary corresponding to the file. If the file is a URL, returns a\n", + " | FileData-type dictionary corresponding to the URL. This ensures that the file is\n", + " | accessible in the frontend and can be served to users.\n", + " |\n", + " | Examples:\n", + " | >>> block.serve_static_file(\"https://gradio.app/logo.png\") -> {\"path\": \"https://gradio.app/logo.png\", \"url\": \"https://gradio.app/logo.png\"}\n", + " | >>> block.serve_static_file(\"logo.png\") -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " | >>> block.serve_static_file({\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}) -> {\"path\": \"logo.png\", \"url\": \"/file=logo.png\"}\n", + " |\n", + " | unrender(self)\n", + " | Removes self from BlockContext if it has been rendered (otherwise does nothing).\n", + " | Removes self from the layout and collection of blocks, but does not delete any event triggers.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Class methods inherited from gradio.blocks.Block:\n", + " |\n", + " | recover_kwargs(props: 'dict[str, Any]', additional_keys: 'list[str] | None' = None)\n", + " | Recovers kwargs from a dict of props.\n", + " |\n", + " | ----------------------------------------------------------------------\n", + " | Readonly properties inherited from gradio.blocks.Block:\n", + " |\n", + " | constructor_args\n", + " | Get the arguments passed to the component's initializer.\n", + " |\n", + " | Only set classes whose metaclass is ComponentMeta\n", + " |\n", + " | events\n", + " |\n", + " | stateful\n", + "\n", + " class Blocks(BlockContext, gradio.blocks_events.BlocksEvents)\n", + " | Blocks(theme: 'Theme | str | None' = None, analytics_enabled: 'bool | None' = None, mode: 'str' = 'blocks', title: 'str' = 'Gradio', css: 'str | None' = None, css_paths: 'str | Path | Sequence[str | Path] | None' = None, js: 'str | Literal[True] | None' = None, head: 'str | None' = None, head_paths: 'str | Path | Sequence[str | Path] | None' = None, fill_height: 'bool' = False, fill_width: 'bool' = False, delete_cache: 'tuple[int, int] | None' = None, **kwargs)\n", + " |\n", + " | Blocks is Gradio's low-level API that allows you to create more custom web\n", + " | applications and demos than Interfaces (yet still entirely in Python).\n", + " |\n", + " |\n", + " | Compared to the Interface class, Blocks offers more flexibility and control over:\n", + " | (1) the layout of components (2) the events that\n", + " | trigger the execution of functions (3) data flows (e.g. inputs can trigger outputs,\n", + " | which can trigger the next level of outputs). Blocks also offers ways to group\n", + " | together related demos such as with tabs.\n", + " |\n", + " |\n", + " | The basic usage of Blocks is as follows: create a Blocks object, then use it as a\n", + " | context (with the \"with\" statement), and then define layouts, components, or events\n", + " | within the Blocks context. Finally, call the launch() method to launch the demo.\n", + " |\n", + " | Example:\n", + " | import gradio as gr\n", + " | def update(name):\n", + " | return f\"Welcome to Gradio, {name}!\"\n", + " |\n", + " | with gr.Blocks() as demo:\n", + " | gr.Markdown(\"Start typing below and then click **Run** to see the output.\")\n", + " | with gr.Row():\n", + " | inp = gr.Textbox(placeholder=\"What is your name?\")\n", + " | out = gr.Textbox()\n", + " | btn = gr.Button(\"Run\")\n", + " | btn.click(fn=update, inputs=inp, outputs=out)\n", + " |\n", + " | demo.launch()\n", + " | Demos: blocks_hello, blocks_flipper, blocks_kinematics\n", + " | Guides: blocks-and-event-listeners, controlling-layout, state-in-blocks, custom-CSS-and-JS, using-blocks-like-functions\n", + " |\n", + " | Method resolution order:\n", + " | Blocks\n", + " | BlockContext\n", + " | Block\n", + " | gradio.blocks_events.BlocksEvents\n", + " | builtins.object\n", + " |\n", + " | Methods defined here:\n", + " |\n", + " | __call__(self, *inputs, fn_index: 'int' = 0, api_name: 'str | None' = None)\n", + " | Allows Blocks objects to be called as functions. Supply the parameters to the\n", + " | function as positional arguments. To choose which function to call, use the\n", + " | fn_index parameter, which must be a keyword argument.\n", + " |\n", + " | Parameters:\n", + " | *inputs: the parameters to pass to the function\n", + " | fn_index: the index of the function to call (defaults to 0, which for Interfaces, is the default prediction function)\n", + " | api_name: The api_name of the dependency to call. Will take precedence over fn_index.\n", + " |\n", + " | __enter__(self)\n", + " |\n", + " | __exit__(self, exc_type: 'type[BaseException] | None' = None, *args)\n", + " |\n", + " | __init__(self, theme: 'Theme | str | None' = None, analytics_enabled: 'bool | None' = None, mode: 'str' = 'blocks', title: 'str' = 'Gradio', css: 'str | None' = None, css_paths: 'str | Path | Sequence[str | Path] | None' = None, js: 'str | Literal[True] | None' = None, head: 'str | None' = None, head_paths: 'str | Path | Sequence[str | Path] | None' = None, fill_height: 'bool' = False, fill_width: 'bool' = False, delete_cache: 'tuple[int, int] | None' = None, **kwargs)\n", + " | Parameters:\n", + " | theme: A Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. \"soft\" or \"default\"), or will attempt to load a theme from the Hugging Face Hub (e.g. \"gradio/monochrome\"). If None, will use the Default theme.\n", + " | analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True.\n", + " | mode: A human-friendly name for the kind of Blocks or Interface being created. Used internally for analytics.\n", + " | title: The tab title to display when this is opened in a browser window.\n", + " | css: Custom css as a code string. This css will be included in the demo webpage.\n", + " | css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first.\n", + " | js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside