{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 🐝 Hugging Face Space Creator Agent\n", "\n", "Run this notebook to launch the Agent Creator. This tool allows you to convert code to Gradio apps and deploy them to Hugging Face Spaces." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# @title 1. Install Dependencies\n", "!pip install -q gradio langchain-groq huggingface_hub python-dotenv nbconvert beautifulsoup4" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# @title 2. Configuration\n", "import os\n", "from google.colab import userdata\n", "\n", "# Try to get from Colab Secrets, otherwise verify env\n", "try:\n", " os.environ[\"GROQ_API_KEY\"] = userdata.get('GROQ_API_KEY')\n", "except (ImportError, AttributeError, Exception):\n", " pass\n", "\n", "if not os.environ.get(\"GROQ_API_KEY\"):\n", " print(\"⚠️ GROQ_API_KEY not found in secrets. Please set it manually below or rely on the UI input if we implemented that (code currently expects env var).\")\n", " key = input(\"Enter your GROQ_API_KEY: \")\n", " os.environ[\"GROQ_API_KEY\"] = key" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# @title 3. Define Backend Logic\n", "\n", "import os\n", "from langchain_groq import ChatGroq\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import JsonOutputParser\n", "from typing import Dict, Any, Optional\n", "import json\n", "from huggingface_hub import HfApi, create_repo\n", "import zipfile\n", "import io\n", "\n", "# --- Converter Logic ---\n", "def convert_code(input_code: str, model_name: str = \"llama-3.3-70b-versatile\") -> Dict[str, str]:\n", " \"\"\"\n", " Converts input Python/IPYNB code into a Gradio app structure.\n", " Returns a dictionary containing app.py, requirements.txt, and README.md content.\n", " \"\"\"\n", " \n", " # Check for API Key\n", " if not os.environ.get(\"GROQ_API_KEY\"):\n", " raise ValueError(\"GROQ_API_KEY environment variable is not set.\")\n", "\n", " chat = ChatGroq(temperature=0, model_name=model_name)\n", "\n", " system_prompt = \"\"\"You are an expert Python developer specializing in Gradio and Hugging Face Spaces. \n", " Your task is to convert the provided Python code (which might be a script or a notebook content) into a deployable Gradio web application.\n", " \n", " You must output a JSON object with exactly three keys:\n", " 1. \"app_py\": The complete code for app.py. It must use Gradio to create a UI for the functionality in the source code. Ensure all imports are correct.\n", " 2. \"requirements_txt\": A list of dependencies required to run the app. Include 'gradio'.\n", " 3. \"readme_md\": A README.md file customized for a Hugging Face Space.\n", " \n", " Rules for app.py:\n", " - Encapsulate logic in functions.\n", " - Create a professional Gradio interface `demo = gr.Interface(...)` or `with gr.Blocks() as demo: ...`.\n", " - Ensure `demo.launch()` is called at the end if it's main, but standard HF spaces just look for `demo` object or run the script.\n", " - Handle potential errors gracefully.\n", " \n", " Do not include markdown triple backticks in the JSON values. The values should be raw string content.\n", " \"\"\"\n", "\n", " human_template = \"Convert this code into a Gradio app:\\n\\n{code}\"\n", " \n", " prompt = ChatPromptTemplate.from_messages([\n", " (\"system\", system_prompt),\n", " (\"human\", human_template)\n", " ])\n", "\n", " chain = prompt | chat | JsonOutputParser()\n", "\n", " try:\n", " result = chain.invoke({\"code\": input_code})\n", " return result\n", " except Exception as e:\n", " # Fallback or error handling\n", " raise RuntimeError(f\"Failed to generate code: {str(e)}\")\n", "\n", "def parse_notebook(notebook_content: dict) -> str:\n", " \"\"\"Extracts code cells from a notebook dictionary.\"\"\"\n", " code = []\n", " for cell in notebook_content.get('cells', []):\n", " if cell.get('cell_type') == 'code':\n", " code.append(\"\".join(cell.get('source', [])))\n", " return \"\\n\\n\".join(code)\n", "\n", "# --- Deployer Logic ---\n", "def deploy_to_space(\n", " token: str, \n", " space_name: str, \n", " files: Dict[str, str], \n", " username: Optional[str] = None\n", ") -> str:\n", " \"\"\"\n", " Deploys the given files to a Hugging Face Space.\n", " Returns the URL of the deployed space.\n", " \"\"\"\n", " if not token:\n", " raise ValueError(\"Hugging Face Token is required.\")\n", "\n", " api = HfApi(token=token)\n", " \n", " # Authenticate and get user\n", " try:\n", " user_info = api.whoami()\n", " if not username:\n", " username = user_info['name']\n", " except Exception as e:\n", " raise ValueError(f\"Invalid Token: {str(e)}\")\n", "\n", " repo_id = f\"{username}/{space_name}\"\n", "\n", " # Create Repo if not exists\n", " try:\n", " api.create_repo(\n", " repo_id=repo_id,\n", " repo_type=\"space\",\n", " space_sdk=\"gradio\",\n", " exist_ok=True,\n", " private=False # Default to public, can be changed\n", " )\n", " except Exception as e:\n", " print(f\"Repo creation/check status: {e}\")\n", "\n", " try:\n", " for filename, content in files.items():\n", " content_bytes = content.encode('utf-8')\n", " api.upload_file(\n", " path_or_fileobj=content_bytes,\n", " path_in_repo=filename,\n", " repo_id=repo_id,\n", " repo_type=\"space\",\n", " commit_message=f\"Update {filename} via Agent\"\n", " )\n", " \n", " return f\"https://huggingface.co/spaces/{repo_id}\"\n", " \n", " except Exception as e:\n", " raise RuntimeError(f\"Deployment Failed: {str(e)}\")\n", "\n", "# --- Utils ---\n", "def create_zip_archive(files: dict) -> bytes:\n", " \"\"\"Creates a zip archive in memory from a dictionary of filename: content.\"\"\"\n", " zip_buffer = io.BytesIO()\n", " with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n", " for filename, content in files.items():\n", " zip_file.writestr(filename, content)\n", " zip_buffer.seek(0)\n", " return zip_buffer.getvalue()\n", "\n", "def extract_code_from_ipynb(ipynb_content: str) -> str:\n", " \"\"\"Extracts code cells from ipynb json string.\"\"\"\n", " try:\n", " data = json.loads(ipynb_content)\n", " code_cells = []\n", " for cell in data.get('cells', []):\n", " if cell.get('cell_type') == 'code':\n", " source = cell.get('source', [])\n", " if isinstance(source, list):\n", " code_cells.append(''.join(source))\n", " elif isinstance(source, str):\n", " code_cells.append(source)\n", " return '\\n\\n'.join(code_cells)\n", " except json.JSONDecodeError:\n", " return ipynb_content" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# @title 4. Launch Application\n", "import gradio as gr\n", "import tempfile\n", "\n", "# Custom CSS\n", "custom_css = \"\"\"\n", ":root {\n", " --primary-color: #FFD700; /* Gold/Yellow */\n", " --secondary-color: #1a1a1a; /* Dark Gray/Black */\n", " --text-color: #333333;\n", " --bg-color: #f4f4f4;\n", "}\n", "body { background-color: var(--bg-color); font-family: 'Roboto', sans-serif; }\n", "gradio-app { background-color: var(--bg-color) !important; }\n", ".gradio-container { max-width: 1200px !important; }\n", "button.primary { \n", " background-color: var(--primary-color) !important; \n", " color: var(--secondary-color) !important; \n", " font-weight: bold !important; \n", " border: none !important; \n", "}\n", "\"\"\"\n", "\n", "def process_and_deploy(input_text, input_file, hf_token, space_name, deploy_mode):\n", " try:\n", " # 1. Get Source Code\n", " code_content = \"\"\n", " if input_file is not None:\n", " if input_file.name.endswith('.ipynb'):\n", " with open(input_file.name, 'r', encoding='utf-8') as f:\n", " content = f.read()\n", " code_content = extract_code_from_ipynb(content)\n", " else:\n", " with open(input_file.name, 'r', encoding='utf-8') as f:\n", " code_content = f.read()\n", " elif input_text.strip():\n", " code_content = input_text\n", " else:\n", " return None, \"Please provide either a file or paste code.\"\n", "\n", " if not code_content.strip():\n", " return None, \"No code found to convert.\"\n", "\n", " # 2. Convert\n", " try:\n", " conversion_result = convert_code(code_content)\n", " except Exception as e:\n", " return None, f\"AI Conversion Failed: {str(e)}\"\n", "\n", " files_dict = {\n", " \"app.py\": conversion_result[\"app_py\"],\n", " \"requirements.txt\": conversion_result[\"requirements_txt\"],\n", " \"README.md\": conversion_result[\"readme_md\"]\n", " }\n", "\n", " # 3. Zip\n", " zip_bytes = create_zip_archive(files_dict)\n", " temp_dir = tempfile.mkdtemp()\n", " zip_path = os.path.join(temp_dir, \"huggingface_space_files.zip\")\n", " with open(zip_path, \"wb\") as f:\n", " f.write(zip_bytes)\n", " \n", " status_msg = \"Conversion Successful! Download the zip below.\"\n", " \n", " # 4. Deploy\n", " if deploy_mode == \"Convert & Deploy to HF Space\":\n", " if not hf_token or not space_name:\n", " status_msg += \"\\n\\nDeployment Skipped: Missing HF Token or Space Name.\"\n", " else:\n", " try:\n", " deploy_url = deploy_to_space(hf_token, space_name, files_dict)\n", " status_msg += f\"\\n\\nSuccessfully Deployed to: {deploy_url}\"\n", " except Exception as e:\n", " status_msg += f\"\\n\\nDeployment Failed: {str(e)}\"\n", "\n", " return zip_path, status_msg\n", "\n", " except Exception as e:\n", " return None, f\"An unexpected error occurred: {str(e)}\"\n", "\n", "with gr.Blocks(css=custom_css, title=\"HF Agent Creator\") as demo:\n", " with gr.Row():\n", " gr.Markdown(\"# 🐝 Hugging Face Space Creator Agent\")\n", " \n", " gr.Markdown(\"Transform your local Python scripts or Jupyter Notebooks into ready-to-deploy Hugging Face Spaces instantly.\")\n", " \n", " with gr.Row():\n", " with gr.Column(scale=1):\n", " gr.Markdown(\"### 1. Source Code\")\n", " with gr.Tabs():\n", " with gr.TabItem(\"Upload File\"):\n", " file_input = gr.File(label=\"Upload .py or .ipynb file\", file_types=[\".py\", \".ipynb\"])\n", " with gr.TabItem(\"Paste Code\"):\n", " text_input = gr.Code(label=\"Paste your Python code\", language=\"python\")\n", " \n", " with gr.Column(scale=1):\n", " gr.Markdown(\"### 2. Deployment Details\")\n", " hf_token = gr.Textbox(label=\"Hugging Face Access Token (Write)\", type=\"password\", placeholder=\"hf_...\")\n", " space_name = gr.Textbox(label=\"New Space Name\", placeholder=\"my-awesome-agent\")\n", " action_radio = gr.Radio([\"Convert Only\", \"Convert & Deploy to HF Space\"], label=\"Action\", value=\"Convert Only\")\n", " submit_btn = gr.Button(\"Generate Agent\", variant=\"primary\", size=\"lg\")\n", "\n", " with gr.Row():\n", " with gr.Group():\n", " gr.Markdown(\"### 3. Results\")\n", " status_output = gr.Markdown(label=\"Status Console\")\n", " zip_output = gr.File(label=\"Download Generated Files\")\n", "\n", " submit_btn.click(\n", " fn=process_and_deploy,\n", " inputs=[text_input, file_input, hf_token, space_name, action_radio],\n", " outputs=[zip_output, status_output]\n", " )\n", "\n", "demo.launch(debug=True, share=True)\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }