Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- .dockerignore +12 -0
- .env.example +15 -0
- .gitignore +75 -0
- Dockerfile +33 -0
- LICENSE +21 -0
- README.md +479 -5
- config.py +49 -0
- main.py +713 -0
- models.py +81 -0
- requirements.txt +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.venv
|
| 2 |
+
__pycache__
|
| 3 |
+
*.pyc
|
| 4 |
+
*.pyo
|
| 5 |
+
*.pyd
|
| 6 |
+
build/
|
| 7 |
+
dist/
|
| 8 |
+
*.egg-info
|
| 9 |
+
.git
|
| 10 |
+
.gitignore
|
| 11 |
+
.env
|
| 12 |
+
tests/
|
.env.example
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Example .env file for GEMINI_PROJECT
|
| 2 |
+
# Copy this to a file named .env and fill in real values. DO NOT commit your .env to source control.
|
| 3 |
+
|
| 4 |
+
# Gemini (Google Generative AI) API key
|
| 5 |
+
GEMINI_API_KEY=your_gemini_api_key_here
|
| 6 |
+
|
| 7 |
+
# GitHub personal access token with permissions to create/update repos and configure pages.
|
| 8 |
+
# Minimum scopes: 'repo' (or 'public_repo' for public-only), and 'pages' if available.
|
| 9 |
+
GITHUB_TOKEN=your_github_personal_access_token_here
|
| 10 |
+
|
| 11 |
+
# Your GitHub username (used to construct Pages URL)
|
| 12 |
+
GITHUB_USERNAME=your_github_username_here
|
| 13 |
+
|
| 14 |
+
# A shared secret expected by the evaluation server (keeps endpoints secure)
|
| 15 |
+
STUDENT_SECRET=your_student_secret_here
|
.gitignore
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
*.egg-info/
|
| 20 |
+
.installed.cfg
|
| 21 |
+
*.egg
|
| 22 |
+
|
| 23 |
+
# Virtual Environment
|
| 24 |
+
.venv/
|
| 25 |
+
venv/
|
| 26 |
+
ENV/
|
| 27 |
+
env/
|
| 28 |
+
|
| 29 |
+
# Testing
|
| 30 |
+
.pytest_cache/
|
| 31 |
+
.coverage
|
| 32 |
+
htmlcov/
|
| 33 |
+
.tox/
|
| 34 |
+
|
| 35 |
+
# IDEs
|
| 36 |
+
.vscode/
|
| 37 |
+
.idea/
|
| 38 |
+
*.swp
|
| 39 |
+
*.swo
|
| 40 |
+
*~
|
| 41 |
+
|
| 42 |
+
# Environment variables
|
| 43 |
+
.env
|
| 44 |
+
|
| 45 |
+
# OS
|
| 46 |
+
.DS_Store
|
| 47 |
+
Thumbs.db
|
| 48 |
+
|
| 49 |
+
# Project specific
|
| 50 |
+
generated_tasks/
|
| 51 |
+
|
| 52 |
+
# Test files and documentation (not needed in production)
|
| 53 |
+
test_*.json
|
| 54 |
+
test_*.py
|
| 55 |
+
test_*.ps1
|
| 56 |
+
*_test.json
|
| 57 |
+
chess_game_*.json
|
| 58 |
+
postman_test.json
|
| 59 |
+
captcha_solver_*.html
|
| 60 |
+
fixed_captcha_solver.html
|
| 61 |
+
run.ps1
|
| 62 |
+
|
| 63 |
+
# Documentation files (optional - keep README.md only)
|
| 64 |
+
ARCHITECTURE_GUIDE.md
|
| 65 |
+
ASSIGNMENT_COMPLIANCE.md
|
| 66 |
+
DEPLOY_CHECKLIST.md
|
| 67 |
+
JSON_REQUEST_GUIDE.md
|
| 68 |
+
README_HF.md
|
| 69 |
+
TESTING.md
|
| 70 |
+
|
| 71 |
+
# Scripts and tests directories
|
| 72 |
+
scripts/
|
| 73 |
+
tests/
|
| 74 |
+
.github/
|
| 75 |
+
*.log
|
Dockerfile
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
# Prevent Python from writing .pyc files and buffer stdout/stderr
|
| 4 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 5 |
+
ENV PYTHONUNBUFFERED=1
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
# Install system deps required by some packages (git, build tools)
|
| 10 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 11 |
+
git \
|
| 12 |
+
build-essential \
|
| 13 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 14 |
+
|
| 15 |
+
# Copy only requirements first to leverage Docker layer caching
|
| 16 |
+
COPY requirements.txt /app/requirements.txt
|
| 17 |
+
|
| 18 |
+
# Install Python dependencies
|
| 19 |
+
RUN python -m pip install --upgrade pip && \
|
| 20 |
+
pip install --no-cache-dir -r /app/requirements.txt
|
| 21 |
+
|
| 22 |
+
# Copy project
|
| 23 |
+
COPY . /app
|
| 24 |
+
|
| 25 |
+
# Create the generated_tasks directory
|
| 26 |
+
RUN mkdir -p /app/generated_tasks
|
| 27 |
+
|
| 28 |
+
# Expose the port Spaces will route to (use PORT env variable default 8080)
|
| 29 |
+
EXPOSE 8080
|
| 30 |
+
ENV PORT=8080
|
| 31 |
+
|
| 32 |
+
# Start Uvicorn; allow PORT override from environment (used by Spaces)
|
| 33 |
+
CMD ["sh", "-c", "uvicorn main:app --host 0.0.0.0 --port ${PORT}"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Aman Sachin Kujur
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,11 +1,485 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: blue
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
|
|
|
| 7 |
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
---
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Gemini Task Automation
|
| 3 |
+
emoji: 🤖
|
| 4 |
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: docker
|
| 7 |
+
app_port: 8080
|
| 8 |
pinned: false
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# 🤖 Gemini Task Automation System
|
| 12 |
+
|
| 13 |
+
**An AI-powered task automation service that receives task descriptions, generates complete web applications using Gemini AI, and automatically deploys them to GitHub Pages.**
|
| 14 |
+
|
| 15 |
+
## 🎯 What Does This Project Do?
|
| 16 |
+
|
| 17 |
+
This is an **automated code generation and deployment pipeline** that:
|
| 18 |
+
|
| 19 |
+
1. **Receives Task Requests** via REST API (POST /ready endpoint)
|
| 20 |
+
2. **Generates Code** using Google's Gemini AI based on natural language descriptions
|
| 21 |
+
3. **Creates GitHub Repositories** automatically for each task
|
| 22 |
+
4. **Deploys to GitHub Pages** making the generated apps instantly accessible
|
| 23 |
+
5. **Notifies Completion** by sending deployment details to a callback URL
|
| 24 |
+
|
| 25 |
+
### 🔄 Complete Workflow
|
| 26 |
+
|
| 27 |
+
```
|
| 28 |
+
User sends task request → API validates → Gemini generates code →
|
| 29 |
+
Creates GitHub repo → Commits & pushes → Enables GitHub Pages →
|
| 30 |
+
Sends notification with live URL
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
## ✨ Key Features
|
| 34 |
+
|
| 35 |
+
- **Fully Generic** - No hardcoded templates, pure AI-driven generation
|
| 36 |
+
- **Background Processing** - Returns HTTP 200 immediately, processes asynchronously
|
| 37 |
+
- **Round-based Updates** - Round 1 creates new repos, Round 2+ updates existing ones
|
| 38 |
+
- **Attachment Support** - Can include images (logos, mockups, sample data) for AI context
|
| 39 |
+
- **Robust Error Handling** - Detailed logging with specific error types
|
| 40 |
+
- **JSON Schema Enforcement** - Ensures structured, parseable AI responses
|
| 41 |
+
- **Exponential Backoff** - Retries for GitHub API operations
|
| 42 |
+
- **Docker Ready** - Production-ready containerization
|
| 43 |
+
|
| 44 |
+
## 📋 How It Works (Technical Deep Dive)
|
| 45 |
+
|
| 46 |
+
### 1️⃣ Request Reception
|
| 47 |
+
```json
|
| 48 |
+
POST /ready
|
| 49 |
+
{
|
| 50 |
+
"email": "user@example.com",
|
| 51 |
+
"secret": "auth-token",
|
| 52 |
+
"task": "chess-game",
|
| 53 |
+
"round": 1,
|
| 54 |
+
"brief": "Create a chess game with...",
|
| 55 |
+
"checks": ["Has license", "Works in browser"],
|
| 56 |
+
"evaluation_url": "https://callback.example.com",
|
| 57 |
+
"attachments": []
|
| 58 |
+
}
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### 2️⃣ AI Code Generation
|
| 62 |
+
- Sends task brief + checks + attachments to **Gemini 2.5 Flash**
|
| 63 |
+
- Uses **JSON schema** to enforce structured output
|
| 64 |
+
- AI generates all files (HTML, CSS, JS, README, LICENSE)
|
| 65 |
+
- Returns: `{"files": [{"path": "index.html", "content": "..."}]}`
|
| 66 |
+
|
| 67 |
+
### 3️⃣ GitHub Repository Setup
|
| 68 |
+
- **Round 1:** Creates new repository via GitHub API
|
| 69 |
+
- **Round 2+:** Clones existing repo, updates files
|
| 70 |
+
- Configures git with user credentials
|
| 71 |
+
- Commits with descriptive messages
|
| 72 |
+
|
| 73 |
+
### 4️⃣ Deployment
|
| 74 |
+
- Pushes to GitHub with retry logic (5 attempts, exponential backoff)
|
| 75 |
+
- Enables GitHub Pages via API
|
| 76 |
+
- Waits for Pages to become active
|
| 77 |
+
|
| 78 |
+
### 5️⃣ Notification
|
| 79 |
+
- POSTs deployment results to `evaluation_url`:
|
| 80 |
+
```json
|
| 81 |
+
{
|
| 82 |
+
"email": "user@example.com",
|
| 83 |
+
"task": "chess-game",
|
| 84 |
+
"repo_url": "https://github.com/user/chess-game",
|
| 85 |
+
"pages_url": "https://user.github.io/chess-game",
|
| 86 |
+
"commit_sha": "abc123..."
|
| 87 |
+
}
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
## 🚀 Deployment Options
|
| 91 |
+
|
| 92 |
+
### Option 1: Docker (Recommended)
|
| 93 |
+
```bash
|
| 94 |
+
docker build -t gemini-automation .
|
| 95 |
+
docker run -p 8080:8080 \
|
| 96 |
+
-e GEMINI_API_KEY=your_key \
|
| 97 |
+
-e GITHUB_TOKEN=your_token \
|
| 98 |
+
-e GITHUB_USERNAME=your_username \
|
| 99 |
+
-e STUDENT_SECRET=your_secret \
|
| 100 |
+
gemini-automation
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
### Option 2: Cloud Platform
|
| 104 |
+
Deploy to any platform supporting Docker:
|
| 105 |
+
- **Hugging Face Spaces** (includes GPU option)
|
| 106 |
+
- **Google Cloud Run** (serverless, auto-scaling)
|
| 107 |
+
- **AWS ECS/Fargate** (enterprise-grade)
|
| 108 |
+
- **Azure Container Instances** (pay-per-use)
|
| 109 |
+
- **DigitalOcean App Platform** (simple, affordable)
|
| 110 |
+
|
| 111 |
+
### Option 3: Local Development
|
| 112 |
+
```bash
|
| 113 |
+
# 1. Clone repository
|
| 114 |
+
git clone https://github.com/YOUR_USERNAME/GEMINI_TDS_PROJECT1.git
|
| 115 |
+
cd GEMINI_TDS_PROJECT1
|
| 116 |
+
|
| 117 |
+
# 2. Create virtual environment
|
| 118 |
+
python -m venv .venv
|
| 119 |
+
source .venv/bin/activate # Linux/Mac
|
| 120 |
+
# OR
|
| 121 |
+
.venv\Scripts\Activate.ps1 # Windows
|
| 122 |
+
|
| 123 |
+
# 3. Install dependencies
|
| 124 |
+
pip install -r requirements.txt
|
| 125 |
+
|
| 126 |
+
# 4. Configure environment
|
| 127 |
+
cp .env.example .env
|
| 128 |
+
# Edit .env with your API keys
|
| 129 |
+
|
| 130 |
+
# 5. Run server
|
| 131 |
+
uvicorn main:app --reload --port 8080
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
Access at: `http://localhost:8080`
|
| 135 |
+
|
| 136 |
+
## 🔑 Required API Keys
|
| 137 |
+
|
| 138 |
+
### 1. Google Gemini API Key
|
| 139 |
+
- Go to: https://aistudio.google.com/app/apikey
|
| 140 |
+
- Click "Create API Key"
|
| 141 |
+
- Copy the key (starts with `AIza...`)
|
| 142 |
+
- **Free tier:** 15 requests/minute, 1500 requests/day
|
| 143 |
+
|
| 144 |
+
### 2. GitHub Personal Access Token
|
| 145 |
+
- Go to: GitHub Settings → Developer settings → Personal access tokens → Tokens (classic)
|
| 146 |
+
- Click "Generate new token (classic)"
|
| 147 |
+
- Select scopes: `repo` (full control of private repositories)
|
| 148 |
+
- Generate and copy token (starts with `ghp_...`)
|
| 149 |
+
- **Never commit this token!**
|
| 150 |
+
|
| 151 |
+
### 3. Student Secret (Custom Auth)
|
| 152 |
+
- Create your own secret string (e.g., `my-secret-key-12345`)
|
| 153 |
+
- Used to authenticate incoming requests
|
| 154 |
+
- Can be any string you choose
|
| 155 |
+
|
| 156 |
+
## ⚙️ Environment Variables
|
| 157 |
+
|
| 158 |
+
Create a `.env` file in the project root:
|
| 159 |
+
|
| 160 |
+
```env
|
| 161 |
+
GEMINI_API_KEY=AIzaSy...your_key_here
|
| 162 |
+
GITHUB_TOKEN=ghp_...your_token_here
|
| 163 |
+
GITHUB_USERNAME=your_github_username
|
| 164 |
+
STUDENT_SECRET=your_custom_secret_string
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
| Variable | Required | Description |
|
| 168 |
+
|----------|----------|-------------|
|
| 169 |
+
| `GEMINI_API_KEY` | ✅ Yes | Google Generative AI API key for code generation |
|
| 170 |
+
| `GITHUB_TOKEN` | ✅ Yes | GitHub PAT with `repo` scope for repo operations |
|
| 171 |
+
| `GITHUB_USERNAME` | ✅ Yes | Your GitHub username for repository creation |
|
| 172 |
+
| `STUDENT_SECRET` | ✅ Yes | Shared secret for authenticating incoming requests |
|
| 173 |
+
|
| 174 |
+
## 📊 Project Architecture
|
| 175 |
+
|
| 176 |
+
```
|
| 177 |
+
┌─────────────┐ ┌──────────────┐ ┌─────────────┐
|
| 178 |
+
│ Client │─────▶│ FastAPI │─────▶│ Gemini AI │
|
| 179 |
+
│ (Postman) │◀─────│ /ready │◀─────│ (Code Gen) │
|
| 180 |
+
└─────────────┘ └──────────────┘ └─────────────┘
|
| 181 |
+
│
|
| 182 |
+
▼
|
| 183 |
+
┌──────────────┐
|
| 184 |
+
│ GitPython │
|
| 185 |
+
│ (Local Ops) │
|
| 186 |
+
└──────────────┘
|
| 187 |
+
│
|
| 188 |
+
▼
|
| 189 |
+
┌──────────────┐ ┌─────────────┐
|
| 190 |
+
│ GitHub API │─────▶│GitHub Pages │
|
| 191 |
+
│ (Create Repo)│ │ (Deploy) │
|
| 192 |
+
└──────────────┘ └─────────────┘
|
| 193 |
+
│
|
| 194 |
+
▼
|
| 195 |
+
┌──────────────┐
|
| 196 |
+
│ Callback URL │
|
| 197 |
+
│ (Notify Done)│
|
| 198 |
+
└──────────────┘
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
+
## 🛠️ Technology Stack
|
| 202 |
+
|
| 203 |
+
| Component | Technology | Purpose |
|
| 204 |
+
|-----------|-----------|---------|
|
| 205 |
+
| **API Framework** | FastAPI | High-performance REST API |
|
| 206 |
+
| **AI Model** | Gemini 2.5 Flash | Code generation from natural language |
|
| 207 |
+
| **Validation** | Pydantic | Request/config validation |
|
| 208 |
+
| **Git Operations** | GitPython | Local repo management |
|
| 209 |
+
| **GitHub Integration** | GitHub REST API | Repo creation, Pages deployment |
|
| 210 |
+
| **Async Tasks** | asyncio | Background task processing |
|
| 211 |
+
| **HTTP Client** | httpx | Async HTTP requests |
|
| 212 |
+
| **Container** | Docker | Production deployment |
|
| 213 |
+
|
| 214 |
+
## 📁 Project Structure
|
| 215 |
+
|
| 216 |
+
```
|
| 217 |
+
GEMINI_TDS_PROJECT1/
|
| 218 |
+
├── main.py # FastAPI app + orchestration logic
|
| 219 |
+
├── config.py # Environment config with validation
|
| 220 |
+
├── models.py # Pydantic request/response models
|
| 221 |
+
├── requirements.txt # Python dependencies
|
| 222 |
+
├── Dockerfile # Production container definition
|
| 223 |
+
├── .dockerignore # Docker build exclusions
|
| 224 |
+
├── .gitignore # Git exclusions
|
| 225 |
+
├── .env.example # Template for environment variables
|
| 226 |
+
├── LICENSE # MIT license
|
| 227 |
+
└── README.md # This file
|
| 228 |
+
```
|
| 229 |
+
|
| 230 |
+
## 📖 API Documentation
|
| 231 |
+
|
| 232 |
+
### POST /ready
|
| 233 |
+
|
| 234 |
+
**Description:** Submit a task for AI-powered code generation and deployment
|
| 235 |
+
|
| 236 |
+
**Request Body:**
|
| 237 |
+
```json
|
| 238 |
+
{
|
| 239 |
+
"email": "user@example.com",
|
| 240 |
+
"secret": "your_student_secret",
|
| 241 |
+
"task": "unique-task-id",
|
| 242 |
+
"round": 1,
|
| 243 |
+
"nonce": "unique-request-id",
|
| 244 |
+
"brief": "Detailed description of what to build...",
|
| 245 |
+
"checks": ["Requirement 1", "Requirement 2"],
|
| 246 |
+
"evaluation_url": "https://webhook.site/your-id",
|
| 247 |
+
"attachments": [
|
| 248 |
+
{
|
| 249 |
+
"name": "logo.png",
|
| 250 |
+
"url": "data:image/png;base64,iVBORw0KGgo..."
|
| 251 |
+
}
|
| 252 |
+
]
|
| 253 |
+
}
|
| 254 |
+
```
|
| 255 |
+
|
| 256 |
+
**Response:**
|
| 257 |
+
```json
|
| 258 |
+
{
|
| 259 |
+
"message": "Task received successfully!",
|
| 260 |
+
"task_id": "unique-task-id"
|
| 261 |
+
}
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
**Status Codes:**
|
| 265 |
+
- `200 OK` - Task accepted, processing in background
|
| 266 |
+
- `403 Forbidden` - Invalid secret
|
| 267 |
+
- `422 Unprocessable Entity` - Invalid request format
|
| 268 |
+
|
| 269 |
+
### Callback Notification
|
| 270 |
+
|
| 271 |
+
When deployment completes, the API POSTs to your `evaluation_url`:
|
| 272 |
+
|
| 273 |
+
```json
|
| 274 |
+
{
|
| 275 |
+
"email": "user@example.com",
|
| 276 |
+
"task": "unique-task-id",
|
| 277 |
+
"round": 1,
|
| 278 |
+
"nonce": "unique-request-id",
|
| 279 |
+
"repo_url": "https://github.com/username/unique-task-id",
|
| 280 |
+
"commit_sha": "abc123def456...",
|
| 281 |
+
"pages_url": "https://username.github.io/unique-task-id"
|
| 282 |
+
}
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
## 🧪 Testing
|
| 286 |
+
|
| 287 |
+
### Test with Postman / cURL
|
| 288 |
+
|
| 289 |
+
**1. Get a webhook URL:**
|
| 290 |
+
- Go to https://webhook.site
|
| 291 |
+
- Copy your unique URL
|
| 292 |
+
|
| 293 |
+
**2. Send test request:**
|
| 294 |
+
|
| 295 |
+
```bash
|
| 296 |
+
curl -X POST http://localhost:8080/ready \
|
| 297 |
+
-H "Content-Type: application/json" \
|
| 298 |
+
-d '{
|
| 299 |
+
"email": "test@example.com",
|
| 300 |
+
"secret": "your_student_secret",
|
| 301 |
+
"task": "hello-world-test",
|
| 302 |
+
"round": 1,
|
| 303 |
+
"nonce": "test-001",
|
| 304 |
+
"brief": "Create a simple hello world webpage with a gradient background and centered text saying Hello World!",
|
| 305 |
+
"checks": ["Has index.html", "Has MIT license", "Text displays"],
|
| 306 |
+
"evaluation_url": "YOUR_WEBHOOK_URL_HERE",
|
| 307 |
+
"attachments": []
|
| 308 |
+
}'
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
**3. Check results:**
|
| 312 |
+
- API returns immediately: `{"message": "Task received successfully!"}`
|
| 313 |
+
- Watch webhook.site for completion notification (~30-60 seconds)
|
| 314 |
+
- Visit the `pages_url` in notification to see live site
|
| 315 |
+
|
| 316 |
+
### Example Tasks
|
| 317 |
+
|
| 318 |
+
<details>
|
| 319 |
+
<summary><b>Calculator App</b></summary>
|
| 320 |
+
|
| 321 |
+
```json
|
| 322 |
+
{
|
| 323 |
+
"email": "test@example.com",
|
| 324 |
+
"secret": "your_secret",
|
| 325 |
+
"task": "calculator-app",
|
| 326 |
+
"round": 1,
|
| 327 |
+
"nonce": "calc-001",
|
| 328 |
+
"brief": "Create a calculator with: 1) Basic operations (+, -, ×, ÷), 2) Clear button, 3) Decimal support, 4) Keyboard input, 5) Responsive design with Tailwind CSS",
|
| 329 |
+
"checks": [
|
| 330 |
+
"Has MIT license",
|
| 331 |
+
"README explains usage",
|
| 332 |
+
"Calculator performs addition",
|
| 333 |
+
"Calculator performs subtraction",
|
| 334 |
+
"Has clear button",
|
| 335 |
+
"Responsive design"
|
| 336 |
+
],
|
| 337 |
+
"evaluation_url": "https://webhook.site/your-id",
|
| 338 |
+
"attachments": []
|
| 339 |
+
}
|
| 340 |
+
```
|
| 341 |
+
</details>
|
| 342 |
+
|
| 343 |
+
<details>
|
| 344 |
+
<summary><b>Todo List</b></summary>
|
| 345 |
+
|
| 346 |
+
```json
|
| 347 |
+
{
|
| 348 |
+
"email": "test@example.com",
|
| 349 |
+
"secret": "your_secret",
|
| 350 |
+
"task": "todo-list-app",
|
| 351 |
+
"round": 1,
|
| 352 |
+
"nonce": "todo-001",
|
| 353 |
+
"brief": "Create a todo list with: 1) Add new tasks, 2) Mark tasks as complete, 3) Delete tasks, 4) LocalStorage persistence, 5) Filter by All/Active/Completed, 6) Task counter, 7) Beautiful UI with animations",
|
| 354 |
+
"checks": [
|
| 355 |
+
"Can add tasks",
|
| 356 |
+
"Can mark complete",
|
| 357 |
+
"Can delete tasks",
|
| 358 |
+
"Tasks persist on refresh",
|
| 359 |
+
"Has filter buttons",
|
| 360 |
+
"Shows task count"
|
| 361 |
+
],
|
| 362 |
+
"evaluation_url": "https://webhook.site/your-id",
|
| 363 |
+
"attachments": []
|
| 364 |
+
}
|
| 365 |
+
```
|
| 366 |
+
</details>
|
| 367 |
+
|
| 368 |
+
<details>
|
| 369 |
+
<summary><b>Chess Game (With Attachments)</b></summary>
|
| 370 |
+
|
| 371 |
+
```json
|
| 372 |
+
{
|
| 373 |
+
"email": "test@example.com",
|
| 374 |
+
"secret": "your_secret",
|
| 375 |
+
"task": "chess-game-pro",
|
| 376 |
+
"round": 1,
|
| 377 |
+
"nonce": "chess-001",
|
| 378 |
+
"brief": "Create a chess game with: 1) Full chess rules, 2) Drag-and-drop pieces, 3) Move validation, 4) Check/Checkmate detection, 5) Timed modes (Blitz 5min, Rapid 10min), 6) Move history, 7) Captured pieces display",
|
| 379 |
+
"checks": [
|
| 380 |
+
"All pieces move correctly",
|
| 381 |
+
"Check detection works",
|
| 382 |
+
"Checkmate ends game",
|
| 383 |
+
"Timer counts down",
|
| 384 |
+
"Move history displays"
|
| 385 |
+
],
|
| 386 |
+
"evaluation_url": "https://webhook.site/your-id",
|
| 387 |
+
"attachments": []
|
| 388 |
+
}
|
| 389 |
+
```
|
| 390 |
+
</details>
|
| 391 |
+
|
| 392 |
+
## 🐛 Troubleshooting
|
| 393 |
+
|
| 394 |
+
### Common Issues
|
| 395 |
+
|
| 396 |
+
**Problem:** `403 Forbidden` response
|
| 397 |
+
- **Solution:** Check that `secret` in request matches `STUDENT_SECRET` env var
|
| 398 |
+
|
| 399 |
+
**Problem:** Task accepted but no notification received
|
| 400 |
+
- **Solution:** Check Hugging Face Space logs or local console for errors. Common causes:
|
| 401 |
+
- Invalid GitHub token or insufficient permissions
|
| 402 |
+
- Gemini API quota exceeded
|
| 403 |
+
- Invalid evaluation_url
|
| 404 |
+
|
| 405 |
+
**Problem:** GitHub API errors (403, 404)
|
| 406 |
+
- **Solution:** Verify GitHub token has `repo` scope:
|
| 407 |
+
```bash
|
| 408 |
+
curl -H "Authorization: token YOUR_TOKEN" https://api.github.com/user
|
| 409 |
+
```
|
| 410 |
+
|
| 411 |
+
**Problem:** Gemini AI returns invalid JSON
|
| 412 |
+
- **Solution:** Check logs for response. The system now has improved error handling with specific error messages.
|
| 413 |
+
|
| 414 |
+
**Problem:** Pages deployment times out
|
| 415 |
+
- **Solution:** GitHub Pages can take 1-2 minutes to activate. The system retries 5 times with exponential backoff.
|
| 416 |
+
|
| 417 |
+
### Debug Mode
|
| 418 |
+
|
| 419 |
+
Enable detailed logging:
|
| 420 |
+
```python
|
| 421 |
+
# In main.py, add at top:
|
| 422 |
+
import logging
|
| 423 |
+
logging.basicConfig(level=logging.DEBUG)
|
| 424 |
+
```
|
| 425 |
+
|
| 426 |
+
Or set environment variable:
|
| 427 |
+
```bash
|
| 428 |
+
export LOG_LEVEL=DEBUG # Linux/Mac
|
| 429 |
+
$env:LOG_LEVEL="DEBUG" # Windows PowerShell
|
| 430 |
+
```
|
| 431 |
+
|
| 432 |
+
### Viewing Logs
|
| 433 |
+
|
| 434 |
+
**Docker:**
|
| 435 |
+
```bash
|
| 436 |
+
docker logs -f CONTAINER_ID
|
| 437 |
+
```
|
| 438 |
+
|
| 439 |
+
**Hugging Face Space:**
|
| 440 |
+
Go to Space → "Logs" tab
|
| 441 |
+
|
| 442 |
+
## 🔒 Security Best Practices
|
| 443 |
+
|
| 444 |
+
1. **Never commit `.env` file** - Already in `.gitignore`
|
| 445 |
+
2. **Rotate API keys regularly** - Every 90 days recommended
|
| 446 |
+
3. **Use environment-specific secrets** - Different keys for dev/prod
|
| 447 |
+
4. **Limit GitHub token scope** - Only `repo` or `public_repo` needed
|
| 448 |
+
5. **Validate incoming requests** - `secret` field prevents unauthorized access
|
| 449 |
+
6. **Monitor API usage** - Check Gemini and GitHub API quotas
|
| 450 |
+
|
| 451 |
+
## 📈 Performance & Limits
|
| 452 |
+
|
| 453 |
+
| Metric | Value | Notes |
|
| 454 |
+
|--------|-------|-------|
|
| 455 |
+
| Average task duration | 30-60s | Depends on complexity |
|
| 456 |
+
| Gemini API rate limit | 15/min | Free tier |
|
| 457 |
+
| GitHub API rate limit | 5000/hour | Authenticated |
|
| 458 |
+
| Max attachment size | ~10MB | Base64 encoding adds 33% |
|
| 459 |
+
| Concurrent tasks | Unlimited | Background processing |
|
| 460 |
+
|
| 461 |
+
## 🤝 Contributing
|
| 462 |
+
|
| 463 |
+
Contributions welcome! Areas for improvement:
|
| 464 |
+
- [ ] Add support for GitLab/Bitbucket deployment
|
| 465 |
+
- [ ] Implement task queue with Redis
|
| 466 |
+
- [ ] Add progress tracking API
|
| 467 |
+
- [ ] Support multiple AI models (Claude, GPT-4)
|
| 468 |
+
- [ ] Add unit tests
|
| 469 |
+
- [ ] Implement rate limiting
|
| 470 |
+
- [ ] Add metrics/monitoring
|
| 471 |
+
|
| 472 |
+
## 📄 License
|
| 473 |
+
|
| 474 |
+
MIT License - see [LICENSE](LICENSE) file for details
|
| 475 |
+
|
| 476 |
+
## 🙏 Acknowledgments
|
| 477 |
+
|
| 478 |
+
- **Google Gemini AI** - Code generation capabilities
|
| 479 |
+
- **FastAPI** - Modern Python web framework
|
| 480 |
+
- **GitHub** - Repository hosting and Pages deployment
|
| 481 |
+
- **Hugging Face** - Spaces platform for easy deployment
|
| 482 |
+
|
| 483 |
+
---
|
| 484 |
+
|
| 485 |
+
**Built for TDS Project 1** - Automated task generation and deployment system
|
config.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# config.py
|
| 2 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 3 |
+
from functools import lru_cache
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
|
| 8 |
+
# Load .env early so environment variables are available to Pydantic Settings.
|
| 9 |
+
project_root = Path(__file__).resolve().parent
|
| 10 |
+
load_dotenv(dotenv_path=project_root / '.env')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Define the structure for all required secrets/config
|
| 14 |
+
class Settings(BaseSettings):
|
| 15 |
+
# API Keys/Tokens — optional at construction time; validated explicitly.
|
| 16 |
+
GEMINI_API_KEY: Optional[str] = None
|
| 17 |
+
GITHUB_TOKEN: Optional[str] = None
|
| 18 |
+
|
| 19 |
+
# Project-specific variables
|
| 20 |
+
STUDENT_SECRET: Optional[str] = None
|
| 21 |
+
GITHUB_USERNAME: Optional[str] = None
|
| 22 |
+
|
| 23 |
+
# Define which file to load settings from (keeps behavior explicit)
|
| 24 |
+
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
|
| 25 |
+
|
| 26 |
+
def validate_required(self) -> None:
|
| 27 |
+
"""Perform explicit validation and raise a clear error if any required
|
| 28 |
+
environment variable is missing or empty.
|
| 29 |
+
"""
|
| 30 |
+
missing = []
|
| 31 |
+
for name in ("GEMINI_API_KEY", "GITHUB_TOKEN", "STUDENT_SECRET", "GITHUB_USERNAME"):
|
| 32 |
+
val = getattr(self, name, None)
|
| 33 |
+
if val is None or (isinstance(val, str) and val.strip() == ""):
|
| 34 |
+
missing.append(name)
|
| 35 |
+
|
| 36 |
+
if missing:
|
| 37 |
+
raise RuntimeError(
|
| 38 |
+
"Missing required environment variables: " + ", ".join(missing) +
|
| 39 |
+
".\nPlease create a .env file (see .env.example) or set these in your environment."
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# Use lru_cache to load the settings only once, improving performance
|
| 44 |
+
@lru_cache()
|
| 45 |
+
def get_settings():
|
| 46 |
+
"""Returns the cached settings object and validates required vars."""
|
| 47 |
+
settings = Settings()
|
| 48 |
+
settings.validate_required()
|
| 49 |
+
return settings
|
main.py
ADDED
|
@@ -0,0 +1,713 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from starlette.responses import JSONResponse
|
| 3 |
+
from models import TaskRequest # Ensure models.py is available
|
| 4 |
+
from config import get_settings
|
| 5 |
+
import asyncio
|
| 6 |
+
import httpx # Used for making the HTTP notification call
|
| 7 |
+
import json # For parsing the structured JSON response from the LLM
|
| 8 |
+
import os # For configuration and file system operations
|
| 9 |
+
import base64
|
| 10 |
+
import re
|
| 11 |
+
import git # For local Git operations
|
| 12 |
+
import time
|
| 13 |
+
import shutil
|
| 14 |
+
import stat # For robust cleanup on Windows
|
| 15 |
+
|
| 16 |
+
# Assuming this model is defined elsewhere
|
| 17 |
+
# --- Configuration and Setup ---
|
| 18 |
+
settings = get_settings()
|
| 19 |
+
|
| 20 |
+
# --- Helper Function for Security ---
|
| 21 |
+
def verify_secret(secret_from_request: str) -> bool:
|
| 22 |
+
"""Checks if the provided secret matches the expected student secret."""
|
| 23 |
+
return secret_from_request == settings.STUDENT_SECRET
|
| 24 |
+
|
| 25 |
+
# --- GITHUB CONSTANTS ---
|
| 26 |
+
GITHUB_API_BASE = "https://api.github.com"
|
| 27 |
+
# Pages URL is constructed dynamically using the username from settings
|
| 28 |
+
GITHUB_PAGES_BASE = f"https://{settings.GITHUB_USERNAME}.github.io"
|
| 29 |
+
# --------------------------
|
| 30 |
+
|
| 31 |
+
# LLM Configuration
|
| 32 |
+
GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-preview-05-20:generateContent"
|
| 33 |
+
# NOTE: API key is left empty (or read from environment) as per instructions;
|
| 34 |
+
# the execution environment is assumed to handle the required authentication.
|
| 35 |
+
GEMINI_API_KEY = settings.GEMINI_API_KEY
|
| 36 |
+
# Initialize the FastAPI application
|
| 37 |
+
app = FastAPI(
|
| 38 |
+
title="Automated Task Receiver & Processor",
|
| 39 |
+
description="Endpoint for receiving task assignments and triggering AI code generation/deployment."
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# Global storage for the last received task (for demonstration purposes)
|
| 43 |
+
received_task_data = {}
|
| 44 |
+
|
| 45 |
+
# --- REFACTORING: SPLIT deploy_to_github ---
|
| 46 |
+
|
| 47 |
+
async def setup_local_repo(local_path: str, repo_name: str, repo_url_auth: str, repo_url_http: str, round_index: int) -> git.Repo:
|
| 48 |
+
"""Handles creating the remote repo (R1) or cloning the existing one (R2+) into an EMPTY directory."""
|
| 49 |
+
|
| 50 |
+
github_username = settings.GITHUB_USERNAME
|
| 51 |
+
github_token = settings.GITHUB_TOKEN
|
| 52 |
+
|
| 53 |
+
headers = {
|
| 54 |
+
"Authorization": f"token {github_token}",
|
| 55 |
+
"Accept": "application/vnd.github.v3+json",
|
| 56 |
+
"X-GitHub-Api-Version": "2022-11-28"
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
async with httpx.AsyncClient(timeout=45) as client:
|
| 60 |
+
try:
|
| 61 |
+
# 1. CREATE or INITIALIZE REPO / CLONE EXISTING REPO
|
| 62 |
+
if round_index == 1:
|
| 63 |
+
print(f" -> R1: Creating remote repository '{repo_name}'...")
|
| 64 |
+
payload = {"name": repo_name, "private": False, "auto_init": True}
|
| 65 |
+
response = await client.post(f"{GITHUB_API_BASE}/user/repos", json=payload, headers=headers)
|
| 66 |
+
response.raise_for_status()
|
| 67 |
+
|
| 68 |
+
# Initialize local git repo in the EMPTY path
|
| 69 |
+
repo = git.Repo.init(local_path)
|
| 70 |
+
repo.create_remote('origin', repo_url_auth)
|
| 71 |
+
print(" -> R1: Local git repository initialized.")
|
| 72 |
+
|
| 73 |
+
elif round_index >= 2:
|
| 74 |
+
# Crucial part for Round 2: Cloning the existing work into the EMPTY local_path
|
| 75 |
+
print(f" -> R{round_index}: Cloning existing repository from {repo_url_http}...")
|
| 76 |
+
# local_path is guaranteed to be empty due to the cleanup and directory creation in the main function
|
| 77 |
+
repo = git.Repo.clone_from(repo_url_auth, local_path)
|
| 78 |
+
print(f" -> R{round_index}: Repository cloned and ready for update.")
|
| 79 |
+
|
| 80 |
+
return repo
|
| 81 |
+
|
| 82 |
+
except httpx.HTTPStatusError as e:
|
| 83 |
+
print(f"--- [API ERROR] GitHub API call failed with status {e.response.status_code}: {e.response.text} ---")
|
| 84 |
+
raise Exception("GitHub API call failed during repository setup.")
|
| 85 |
+
except git.GitCommandError as e:
|
| 86 |
+
print(f"--- [GIT ERROR] Failed to perform git operation: {e} ---")
|
| 87 |
+
raise Exception("Git operation failed during repository setup.")
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
async def commit_and_publish(repo: git.Repo, task_id: str, round_index: int, repo_name: str) -> dict:
|
| 91 |
+
"""Handles adding, committing, pushing, and configuring GitHub Pages after files are saved."""
|
| 92 |
+
|
| 93 |
+
github_username = settings.GITHUB_USERNAME
|
| 94 |
+
github_token = settings.GITHUB_TOKEN
|
| 95 |
+
|
| 96 |
+
headers = {
|
| 97 |
+
"Authorization": f"token {github_token}",
|
| 98 |
+
"Accept": "application/vnd.github.v3+json",
|
| 99 |
+
"X-GitHub-Api-Version": "2022-11-28"
|
| 100 |
+
}
|
| 101 |
+
repo_url_http = f"https://github.com/{github_username}/{repo_name}"
|
| 102 |
+
|
| 103 |
+
async with httpx.AsyncClient(timeout=45) as client:
|
| 104 |
+
try:
|
| 105 |
+
# 1. CONFIGURE GIT USER (required for commits in Docker)
|
| 106 |
+
repo.config_writer().set_value("user", "name", "TDS AutoDeploy Bot").release()
|
| 107 |
+
repo.config_writer().set_value("user", "email", "bot@tds-project.local").release()
|
| 108 |
+
|
| 109 |
+
# 2. ADD, COMMIT, AND PUSH FILES
|
| 110 |
+
# The new files (generated and attachments) are now in the local_path.
|
| 111 |
+
repo.git.add(A=True)
|
| 112 |
+
commit_message = f"Task {task_id} - Round {round_index}: LLM-generated app update/creation"
|
| 113 |
+
repo.index.commit(commit_message)
|
| 114 |
+
commit_sha = repo.head.object.hexsha
|
| 115 |
+
print(f" -> Files committed. SHA: {commit_sha}")
|
| 116 |
+
|
| 117 |
+
# Ensure main branch consistency and push
|
| 118 |
+
repo.git.branch('-M', 'main')
|
| 119 |
+
print(" -> Branch renamed to 'main'.")
|
| 120 |
+
repo.git.push('--set-upstream', 'origin', 'main', force=True)
|
| 121 |
+
print(" -> Changes pushed to remote 'main' branch.")
|
| 122 |
+
|
| 123 |
+
# Wait for GitHub to register the branch
|
| 124 |
+
print(" -> Waiting 10 seconds for GitHub to register the main branch...")
|
| 125 |
+
await asyncio.sleep(10)
|
| 126 |
+
|
| 127 |
+
# 2. ENABLE GITHUB PAGES WITH ROBUST RETRIES
|
| 128 |
+
print(" -> Enabling GitHub Pages with robust retries...")
|
| 129 |
+
pages_api_url = f"{GITHUB_API_BASE}/repos/{github_username}/{repo_name}/pages"
|
| 130 |
+
pages_payload = {"source": {"branch": "main", "path": "/"}}
|
| 131 |
+
pages_max_retries = 5
|
| 132 |
+
pages_base_delay = 3
|
| 133 |
+
|
| 134 |
+
for retry_attempt in range(pages_max_retries):
|
| 135 |
+
try:
|
| 136 |
+
pages_response = await client.get(pages_api_url, headers=headers)
|
| 137 |
+
is_configured = (pages_response.status_code == 200)
|
| 138 |
+
|
| 139 |
+
if is_configured:
|
| 140 |
+
print(f" -> Pages exists. Updating configuration (Attempt {retry_attempt + 1}).")
|
| 141 |
+
(await client.put(pages_api_url, json=pages_payload, headers=headers)).raise_for_status()
|
| 142 |
+
else:
|
| 143 |
+
print(f" -> Creating Pages configuration (Attempt {retry_attempt + 1}).")
|
| 144 |
+
(await client.post(pages_api_url, json=pages_payload, headers=headers)).raise_for_status()
|
| 145 |
+
|
| 146 |
+
print(" -> Pages configuration successful.")
|
| 147 |
+
break
|
| 148 |
+
|
| 149 |
+
except httpx.HTTPStatusError as e:
|
| 150 |
+
if e.response.status_code == 422 and "main branch must exist" in e.response.text and retry_attempt < pages_max_retries - 1:
|
| 151 |
+
delay = pages_base_delay * (2 ** retry_attempt)
|
| 152 |
+
print(f" -> [Timing Issue] Branch not recognized. Retrying in {delay} seconds...")
|
| 153 |
+
await asyncio.sleep(delay)
|
| 154 |
+
else:
|
| 155 |
+
raise
|
| 156 |
+
else:
|
| 157 |
+
raise Exception("Failed to configure GitHub Pages after multiple retries due to branch existence.")
|
| 158 |
+
|
| 159 |
+
# 3. CONSTRUCT RETURN VALUES
|
| 160 |
+
print(" -> Waiting 5 seconds for GitHub Pages deployment...")
|
| 161 |
+
await asyncio.sleep(5)
|
| 162 |
+
|
| 163 |
+
pages_url = f"{GITHUB_PAGES_BASE}/{repo_name}/"
|
| 164 |
+
|
| 165 |
+
return {
|
| 166 |
+
"repo_url": repo_url_http,
|
| 167 |
+
"commit_sha": commit_sha,
|
| 168 |
+
"pages_url": pages_url
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
except git.GitCommandError as e:
|
| 172 |
+
print(f"--- [GIT ERROR] Failed to perform git operation: {e} ---")
|
| 173 |
+
raise Exception("Git operation failed during deployment.")
|
| 174 |
+
except httpx.HTTPStatusError as e:
|
| 175 |
+
print(f"--- [API ERROR] GitHub API call failed with status {e.response.status_code}: {e.response.text} ---")
|
| 176 |
+
raise Exception("GitHub API call failed during deployment.")
|
| 177 |
+
except Exception as e:
|
| 178 |
+
print(f"--- [CRITICAL ERROR] Deployment failed: {e} ---")
|
| 179 |
+
raise
|
| 180 |
+
|
| 181 |
+
# --- REMOVED: Original deploy_to_github (replaced by setup_local_repo and commit_and_publish) ---
|
| 182 |
+
# The function name deploy_to_github is now DELETED.
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def data_uri_to_gemini_part(data_uri: str) -> dict:
|
| 186 |
+
"""
|
| 187 |
+
Extracts Base64 data and MIME type from a Data URI and formats it
|
| 188 |
+
as the 'inlineData' structure required for a Gemini API multimodal part.
|
| 189 |
+
"""
|
| 190 |
+
if not data_uri or not data_uri.startswith("data:"):
|
| 191 |
+
print("ERROR: Invalid Data URI provided.")
|
| 192 |
+
return None
|
| 193 |
+
|
| 194 |
+
try:
|
| 195 |
+
# Extract MIME type and Base64 part using regex
|
| 196 |
+
match = re.search(r"data:(?P<mime_type>[^;]+);base64,(?P<base64_data>.*)", data_uri, re.IGNORECASE)
|
| 197 |
+
if not match:
|
| 198 |
+
print("ERROR: Could not parse MIME type or base64 data from URI.")
|
| 199 |
+
return None
|
| 200 |
+
|
| 201 |
+
mime_type = match.group('mime_type')
|
| 202 |
+
base64_data = match.group('base64_data')
|
| 203 |
+
|
| 204 |
+
# Check if it's a known image type to ensure we only send images to the LLM
|
| 205 |
+
if not mime_type.startswith("image/"):
|
| 206 |
+
print(f"Skipping attachment with non-image MIME type: {mime_type}")
|
| 207 |
+
return None
|
| 208 |
+
|
| 209 |
+
return {
|
| 210 |
+
"inlineData": {
|
| 211 |
+
"data": base64_data, # The Base64 string itself
|
| 212 |
+
"mimeType": mime_type
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
except Exception as e:
|
| 216 |
+
print(f"ERROR creating Gemini Part from URI: {e}")
|
| 217 |
+
return None
|
| 218 |
+
|
| 219 |
+
def is_image_data_uri(data_uri: str) -> bool:
|
| 220 |
+
"""Checks if the data URI refers to an image based on the MIME type."""
|
| 221 |
+
if not data_uri.startswith("data:"):
|
| 222 |
+
return False
|
| 223 |
+
# Check for "image/" prefix in the MIME type part of the URI
|
| 224 |
+
return re.search(r"data:image/[^;]+;base64,", data_uri, re.IGNORECASE) is not None
|
| 225 |
+
# --- Helper Functions for File System Operations ---
|
| 226 |
+
|
| 227 |
+
async def save_generated_files_locally(task_id: str, files: dict) -> str:
|
| 228 |
+
"""
|
| 229 |
+
Saves the generated files (index.html, README.md, LICENSE) into a local
|
| 230 |
+
directory named after the task_id within the 'generated_tasks' folder.
|
| 231 |
+
Handles both old format {filename: content} and new format {"files": [{path, content}]}
|
| 232 |
+
"""
|
| 233 |
+
base_dir = "/tmp/generated_tasks"
|
| 234 |
+
task_dir = os.path.join(base_dir, task_id)
|
| 235 |
+
|
| 236 |
+
# Ensure the task-specific directory exists
|
| 237 |
+
# NOTE: This directory is created earlier in the main orchestration function
|
| 238 |
+
os.makedirs(task_dir, exist_ok=True)
|
| 239 |
+
|
| 240 |
+
print(f"--- [LOCAL_SAVE] Saving files to: {task_dir} ---")
|
| 241 |
+
|
| 242 |
+
# Handle new array-based format: {"files": [{"path": "...", "content": "..."}]}
|
| 243 |
+
if "files" in files and isinstance(files["files"], list):
|
| 244 |
+
files_list = files["files"]
|
| 245 |
+
for file_obj in files_list:
|
| 246 |
+
filename = file_obj.get("path", "")
|
| 247 |
+
content = file_obj.get("content", "")
|
| 248 |
+
|
| 249 |
+
if not filename:
|
| 250 |
+
print(f" -> WARNING: Skipping file with no path")
|
| 251 |
+
continue
|
| 252 |
+
|
| 253 |
+
# Handle case where content is a list instead of string
|
| 254 |
+
if isinstance(content, list):
|
| 255 |
+
print(f" -> WARNING: Content for {filename} is a list, joining with newlines")
|
| 256 |
+
content = "\n".join(str(item) for item in content)
|
| 257 |
+
elif not isinstance(content, str):
|
| 258 |
+
print(f" -> WARNING: Content for {filename} is {type(content)}, converting to string")
|
| 259 |
+
content = str(content)
|
| 260 |
+
|
| 261 |
+
file_path = os.path.join(task_dir, filename)
|
| 262 |
+
try:
|
| 263 |
+
# Create subdirectories if needed (e.g., "css/style.css")
|
| 264 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
| 265 |
+
|
| 266 |
+
# Write the content to the file
|
| 267 |
+
with open(file_path, "w", encoding="utf-8") as f:
|
| 268 |
+
f.write(content)
|
| 269 |
+
print(f" -> Saved: {filename} (Size: {len(content)} bytes)")
|
| 270 |
+
except Exception as e:
|
| 271 |
+
print(f" -> ERROR saving {filename}: {e}")
|
| 272 |
+
print(f" -> Content type: {type(content)}, First 200 chars: {str(content)[:200]}")
|
| 273 |
+
raise Exception(f"Failed to save file {filename} locally.")
|
| 274 |
+
|
| 275 |
+
# Handle old flat format: {filename: content} (for backwards compatibility)
|
| 276 |
+
else:
|
| 277 |
+
for filename, content in files.items():
|
| 278 |
+
# Handle case where content is a list instead of string
|
| 279 |
+
if isinstance(content, list):
|
| 280 |
+
print(f" -> WARNING: Content for {filename} is a list, joining with newlines")
|
| 281 |
+
content = "\n".join(str(item) for item in content)
|
| 282 |
+
elif not isinstance(content, str):
|
| 283 |
+
print(f" -> WARNING: Content for {filename} is {type(content)}, converting to string")
|
| 284 |
+
content = str(content)
|
| 285 |
+
|
| 286 |
+
file_path = os.path.join(task_dir, filename)
|
| 287 |
+
try:
|
| 288 |
+
# Write the content to the file. Assuming content is a string (text files).
|
| 289 |
+
with open(file_path, "w", encoding="utf-8") as f:
|
| 290 |
+
f.write(content)
|
| 291 |
+
print(f" -> Saved: {filename} (Size: {len(content)} bytes)")
|
| 292 |
+
except Exception as e:
|
| 293 |
+
print(f" -> ERROR saving {filename}: {e}")
|
| 294 |
+
raise Exception(f"Failed to save file {filename} locally.")
|
| 295 |
+
|
| 296 |
+
return task_dir
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
# --- Helper Functions for External Services ---
|
| 300 |
+
|
| 301 |
+
async def call_llm_for_code(prompt: str, task_id: str, image_parts: list) -> dict:
|
| 302 |
+
"""
|
| 303 |
+
Calls the Gemini API to generate the web application code and structured
|
| 304 |
+
metadata (README and LICENSE), now supporting image inputs.
|
| 305 |
+
The response is strictly validated against a JSON schema.
|
| 306 |
+
"""
|
| 307 |
+
print(f"--- [LLM_CALL] Attempting to generate code for Task: {task_id} using Gemini API ---")
|
| 308 |
+
|
| 309 |
+
# Define system instruction for the model
|
| 310 |
+
system_prompt = (
|
| 311 |
+
"You are an expert full-stack engineer and technical writer. Your task is to generate "
|
| 312 |
+
"a complete web application with three files in a structured JSON response:\n\n"
|
| 313 |
+
"Return a JSON object with a 'files' array containing:\n"
|
| 314 |
+
"1. index.html - A single, complete, fully responsive HTML file using Tailwind CSS CDN for styling, "
|
| 315 |
+
"with all JavaScript inline. Must be production-ready and implement ALL requested features.\n"
|
| 316 |
+
"2. README.md - Professional project documentation with title, description, features, usage instructions.\n"
|
| 317 |
+
"3. LICENSE - Full text of the MIT License.\n\n"
|
| 318 |
+
"Example response structure:\n"
|
| 319 |
+
"{\n"
|
| 320 |
+
' "files": [\n'
|
| 321 |
+
' {"path": "index.html", "content": "<!DOCTYPE html>..."},\n'
|
| 322 |
+
' {"path": "README.md", "content": "# Project Title\\n\\n..."},\n'
|
| 323 |
+
' {"path": "LICENSE", "content": "MIT License\\n\\nCopyright..."}\n'
|
| 324 |
+
" ]\n"
|
| 325 |
+
"}\n\n"
|
| 326 |
+
"Make the application beautiful, functional, and complete. Use modern design principles."
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
# Define the JSON response structure with proper array-based file list
|
| 330 |
+
response_schema = {
|
| 331 |
+
"type": "OBJECT",
|
| 332 |
+
"properties": {
|
| 333 |
+
"files": {
|
| 334 |
+
"type": "ARRAY",
|
| 335 |
+
"items": {
|
| 336 |
+
"type": "OBJECT",
|
| 337 |
+
"properties": {
|
| 338 |
+
"path": {"type": "STRING", "description": "File name (e.g., 'index.html', 'README.md', 'LICENSE')"},
|
| 339 |
+
"content": {"type": "STRING", "description": "Full content of the file"}
|
| 340 |
+
},
|
| 341 |
+
"required": ["path", "content"]
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
},
|
| 345 |
+
"required": ["files"]
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
# --- CONSTRUCT THE CONTENTS FIELD ---
|
| 349 |
+
contents = []
|
| 350 |
+
|
| 351 |
+
if image_parts:
|
| 352 |
+
# Combine image parts and the text prompt.
|
| 353 |
+
all_parts = image_parts + [
|
| 354 |
+
{ "text": prompt }
|
| 355 |
+
]
|
| 356 |
+
contents.append({ "parts": all_parts })
|
| 357 |
+
else:
|
| 358 |
+
# If no images, use the original structure with only the text prompt
|
| 359 |
+
contents.append({ "parts": [{ "text": prompt }] })
|
| 360 |
+
|
| 361 |
+
# Construct the final API payload
|
| 362 |
+
payload = {
|
| 363 |
+
"contents": contents,
|
| 364 |
+
"systemInstruction": { "parts": [{ "text": system_prompt }] },
|
| 365 |
+
"generationConfig": {
|
| 366 |
+
"responseMimeType": "application/json",
|
| 367 |
+
"responseSchema": response_schema
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
# Use exponential backoff for the API call
|
| 372 |
+
max_retries = 5 # Increased from 3 to 5
|
| 373 |
+
base_delay = 2 # Increased from 1 to 2
|
| 374 |
+
|
| 375 |
+
for attempt in range(max_retries):
|
| 376 |
+
try:
|
| 377 |
+
# Construct the URL with the API key
|
| 378 |
+
url = f"{GEMINI_API_URL}?key={GEMINI_API_KEY}"
|
| 379 |
+
# Increased timeout from 60s to 180s for complex tasks
|
| 380 |
+
async with httpx.AsyncClient(timeout=180.0) as client:
|
| 381 |
+
response = await client.post(
|
| 382 |
+
url,
|
| 383 |
+
json=payload,
|
| 384 |
+
headers={"Content-Type": "application/json"}
|
| 385 |
+
)
|
| 386 |
+
response.raise_for_status() # Raises an exception for 4xx/5xx status codes
|
| 387 |
+
|
| 388 |
+
# Parse the response to get the structured JSON text
|
| 389 |
+
result = response.json()
|
| 390 |
+
|
| 391 |
+
# Extract the generated JSON string from the result
|
| 392 |
+
json_text = result['candidates'][0]['content']['parts'][0]['text']
|
| 393 |
+
|
| 394 |
+
# The LLM output is a JSON string, so we need to parse it into a Python dict
|
| 395 |
+
generated_files = json.loads(json_text)
|
| 396 |
+
|
| 397 |
+
print(f"--- [LLM_CALL] Successfully generated files on attempt {attempt + 1}. ---")
|
| 398 |
+
return generated_files
|
| 399 |
+
|
| 400 |
+
except httpx.HTTPStatusError as e:
|
| 401 |
+
print(f"--- [LLM_CALL] HTTP Error on attempt {attempt + 1}: {e.response.status_code} - {e.response.text[:500]} ---")
|
| 402 |
+
except KeyError as e:
|
| 403 |
+
print(f"--- [LLM_CALL] KeyError on attempt {attempt + 1}: Missing expected key {e} in LLM response. ---")
|
| 404 |
+
print(f"--- [LLM_CALL] Full response: {result if 'result' in locals() else 'No response received'} ---")
|
| 405 |
+
except json.JSONDecodeError as e:
|
| 406 |
+
print(f"--- [LLM_CALL] JSON Decode Error on attempt {attempt + 1}: {e} ---")
|
| 407 |
+
print(f"--- [LLM_CALL] Raw LLM output that failed to parse: {json_text[:1000] if 'json_text' in locals() else 'No text extracted'} ---")
|
| 408 |
+
except httpx.RequestError as e:
|
| 409 |
+
# Catches network errors
|
| 410 |
+
print(f"--- [LLM_CALL] Network Error on attempt {attempt + 1}: {type(e).__name__}: {str(e)} ---")
|
| 411 |
+
|
| 412 |
+
if attempt < max_retries - 1:
|
| 413 |
+
delay = base_delay * (2 ** attempt)
|
| 414 |
+
print(f"--- [LLM_CALL] Retrying LLM call in {delay} seconds... ---")
|
| 415 |
+
await asyncio.sleep(delay)
|
| 416 |
+
|
| 417 |
+
# If all retries fail, we raise an exception which is caught downstream
|
| 418 |
+
print("--- [LLM_CALL] Failed to generate code after multiple retries. ---")
|
| 419 |
+
raise Exception("LLM Code Generation Failure")
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
async def notify_evaluation_server(
|
| 423 |
+
evaluation_url: str,
|
| 424 |
+
email: str,
|
| 425 |
+
task_id: str,
|
| 426 |
+
round_index: int,
|
| 427 |
+
nonce: str,
|
| 428 |
+
repo_url: str,
|
| 429 |
+
commit_sha: str,
|
| 430 |
+
pages_url: str
|
| 431 |
+
) -> bool:
|
| 432 |
+
"""
|
| 433 |
+
Calls the evaluation_url to notify the server that the code has been deployed.
|
| 434 |
+
"""
|
| 435 |
+
payload = {
|
| 436 |
+
"email": email,
|
| 437 |
+
"task": task_id,
|
| 438 |
+
"round": round_index,
|
| 439 |
+
"nonce": nonce,
|
| 440 |
+
"repo_url": repo_url,
|
| 441 |
+
"commit_sha": commit_sha,
|
| 442 |
+
"pages_url": pages_url
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
max_retries = 3
|
| 446 |
+
base_delay = 1
|
| 447 |
+
|
| 448 |
+
print(f"--- [NOTIFICATION] Attempting to notify server at {evaluation_url} ---")
|
| 449 |
+
|
| 450 |
+
for attempt in range(max_retries):
|
| 451 |
+
try:
|
| 452 |
+
async with httpx.AsyncClient(timeout=10) as client:
|
| 453 |
+
response = await client.post(evaluation_url, json=payload)
|
| 454 |
+
response.raise_for_status() # Raises an exception for 4xx/5xx status codes
|
| 455 |
+
|
| 456 |
+
print(f"--- [NOTIFICATION] Successfully notified server. Response: {response.status_code} ---")
|
| 457 |
+
return True
|
| 458 |
+
except httpx.HTTPStatusError as e:
|
| 459 |
+
print(f"--- [NOTIFICATION] HTTP Error on attempt {attempt + 1}: {e}. ---")
|
| 460 |
+
except httpx.RequestError as e:
|
| 461 |
+
print(f"--- [NOTIFICATION] Request Error on attempt {attempt + 1}: {e}. ---")
|
| 462 |
+
|
| 463 |
+
if attempt < max_retries - 1:
|
| 464 |
+
delay = base_delay * (2 ** attempt)
|
| 465 |
+
print(f"--- [NOTIFICATION] Retrying in {delay} seconds... ---")
|
| 466 |
+
await asyncio.sleep(delay)
|
| 467 |
+
|
| 468 |
+
print(f"--- [NOTIFICATION] Failed to notify evaluation server after {max_retries} attempts. ---")
|
| 469 |
+
return False
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
async def save_attachments_locally(task_dir: str, attachments: list) -> list:
|
| 473 |
+
"""
|
| 474 |
+
Decodes and saves attachments (provided as Base64 Data URIs) into the task directory.
|
| 475 |
+
Returns a list of saved filenames.
|
| 476 |
+
"""
|
| 477 |
+
saved_files = []
|
| 478 |
+
print(f"--- [ATTACHMENTS] Processing {len(attachments)} attachments for: {task_dir} ---")
|
| 479 |
+
|
| 480 |
+
for attachment in attachments:
|
| 481 |
+
filename = attachment.name
|
| 482 |
+
data_uri = attachment.url
|
| 483 |
+
|
| 484 |
+
if not filename or not data_uri or not data_uri.startswith("data:"):
|
| 485 |
+
print(f" -> WARNING: Skipping invalid attachment entry: {filename}")
|
| 486 |
+
continue
|
| 487 |
+
|
| 488 |
+
# Use regex to extract the Base64 part of the URI (after base64,)
|
| 489 |
+
match = re.search(r"base64,(.*)", data_uri, re.IGNORECASE)
|
| 490 |
+
if not match:
|
| 491 |
+
print(f" -> ERROR: Could not find base64 data in URI for {filename}")
|
| 492 |
+
continue
|
| 493 |
+
|
| 494 |
+
base64_data = match.group(1)
|
| 495 |
+
file_path = os.path.join(task_dir, filename)
|
| 496 |
+
|
| 497 |
+
try:
|
| 498 |
+
# Decode the base64 string
|
| 499 |
+
file_bytes = base64.b64decode(base64_data)
|
| 500 |
+
|
| 501 |
+
# Write the raw bytes to the file
|
| 502 |
+
with open(file_path, "wb") as f:
|
| 503 |
+
f.write(file_bytes)
|
| 504 |
+
|
| 505 |
+
print(f" -> Saved Attachment: {filename} (Size: {len(file_bytes)} bytes)")
|
| 506 |
+
saved_files.append(filename)
|
| 507 |
+
|
| 508 |
+
except Exception as e:
|
| 509 |
+
print(f" -> CRITICAL ERROR saving attachment {filename}: {e}")
|
| 510 |
+
raise Exception(f"Failed to save attachment {filename} locally.")
|
| 511 |
+
|
| 512 |
+
return saved_files
|
| 513 |
+
# --- Main Orchestration Logic ---
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
async def generate_files_and_deploy(task_data: TaskRequest):
|
| 517 |
+
"""
|
| 518 |
+
The asynchronous background process that executes the main project workflow.
|
| 519 |
+
It adapts the LLM prompt for multi-round tasks and fixes the cloning order.
|
| 520 |
+
"""
|
| 521 |
+
task_id = task_data.task
|
| 522 |
+
email = task_data.email
|
| 523 |
+
round_index = task_data.round
|
| 524 |
+
brief = task_data.brief
|
| 525 |
+
evaluation_url = task_data.evaluation_url
|
| 526 |
+
nonce = task_data.nonce
|
| 527 |
+
attachments = task_data.attachments
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
print(f"\n--- [PROCESS START] Starting background task for {task_id}, Round {round_index} ---")
|
| 531 |
+
|
| 532 |
+
# Deployment configuration
|
| 533 |
+
repo_name = task_id.replace(' ', '-').lower()
|
| 534 |
+
github_username = settings.GITHUB_USERNAME
|
| 535 |
+
github_token = settings.GITHUB_TOKEN
|
| 536 |
+
repo_url_auth = f"https://{github_username}:{github_token}@github.com/{github_username}/{repo_name}.git"
|
| 537 |
+
repo_url_http = f"https://github.com/{github_username}/{repo_name}"
|
| 538 |
+
|
| 539 |
+
try:
|
| 540 |
+
# 0. Setup local directory - use /tmp which is always writable
|
| 541 |
+
base_dir = "/tmp/generated_tasks"
|
| 542 |
+
local_path = os.path.join(base_dir, task_id)
|
| 543 |
+
|
| 544 |
+
# --- ROBUST CLEANUP LOGIC ---
|
| 545 |
+
# Crucial: Cleans up local directory before cloning or creating a new repo.
|
| 546 |
+
if os.path.exists(local_path):
|
| 547 |
+
print(f"--- [CLEANUP] Deleting existing local directory: {local_path} ---")
|
| 548 |
+
|
| 549 |
+
def onerror(func, path, exc_info):
|
| 550 |
+
"""Error handler for shutil.rmtree to handle permission issues."""
|
| 551 |
+
if exc_info[0] is PermissionError or 'WinError 5' in str(exc_info[1]):
|
| 552 |
+
os.chmod(path, stat.S_IWUSR)
|
| 553 |
+
func(path)
|
| 554 |
+
else:
|
| 555 |
+
raise
|
| 556 |
+
|
| 557 |
+
try:
|
| 558 |
+
shutil.rmtree(local_path, onerror=onerror)
|
| 559 |
+
print("--- [CLEANUP] Directory deleted successfully. ---")
|
| 560 |
+
except Exception as e:
|
| 561 |
+
print(f"!!! CRITICAL: Failed to clean up directory. Error: {e}")
|
| 562 |
+
raise Exception(f"Failed to perform local cleanup: {e}")
|
| 563 |
+
|
| 564 |
+
# Create the fresh, EMPTY directory (ready for clone or init)
|
| 565 |
+
os.makedirs(local_path, exist_ok=True)
|
| 566 |
+
# --- END ROBUST CLEANUP ---
|
| 567 |
+
|
| 568 |
+
# 1. SETUP REPO (Clone or Init)
|
| 569 |
+
# MUST run before any files are saved to local_path.
|
| 570 |
+
print(f"--- [DEPLOYMENT] Setting up local Git repository for Round {round_index}... ---")
|
| 571 |
+
repo = await setup_local_repo(
|
| 572 |
+
local_path=local_path,
|
| 573 |
+
repo_name=repo_name,
|
| 574 |
+
repo_url_auth=repo_url_auth,
|
| 575 |
+
repo_url_http=repo_url_http,
|
| 576 |
+
round_index=round_index
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
# 2. Process Attachments for LLM Input
|
| 580 |
+
image_parts = []
|
| 581 |
+
attachment_list_for_llm_prompt = []
|
| 582 |
+
|
| 583 |
+
for attachment in attachments:
|
| 584 |
+
# Check for image parts for LLM input
|
| 585 |
+
if is_image_data_uri(attachment.url):
|
| 586 |
+
gemini_part = data_uri_to_gemini_part(attachment.url)
|
| 587 |
+
if gemini_part:
|
| 588 |
+
image_parts.append(gemini_part)
|
| 589 |
+
|
| 590 |
+
# List all attachment names for the prompt
|
| 591 |
+
attachment_list_for_llm_prompt.append(attachment.name)
|
| 592 |
+
|
| 593 |
+
print(f"--- [LLM_INPUT] Found {len(image_parts)} image(s) to pass to LLM. ---")
|
| 594 |
+
|
| 595 |
+
attachment_list_str = ", ".join(attachment_list_for_llm_prompt)
|
| 596 |
+
|
| 597 |
+
# 3. AI Code Generation - Adapt Prompt for Round 2
|
| 598 |
+
|
| 599 |
+
# --- MODIFICATION START: Adapting the LLM Prompt ---
|
| 600 |
+
if round_index > 1:
|
| 601 |
+
# For Round 2+, tell the LLM it's modifying existing work
|
| 602 |
+
llm_prompt = (
|
| 603 |
+
f"UPDATE INSTRUCTION (ROUND {round_index}): You must modify the existing project files "
|
| 604 |
+
f"(index.html, README.md, LICENSE) based on this new brief: '{brief}'. "
|
| 605 |
+
"You must replace all content in 'index.html', 'README.md', and 'LICENSE' with new, complete versions "
|
| 606 |
+
"that implement the requested modifications. The 'index.html' must remain a single, complete, "
|
| 607 |
+
"fully responsive HTML file using Tailwind CSS."
|
| 608 |
+
)
|
| 609 |
+
else:
|
| 610 |
+
# For Round 1, generate a new application
|
| 611 |
+
llm_prompt = (
|
| 612 |
+
f"Generate a complete, single-file HTML web application to achieve the following: {brief}. "
|
| 613 |
+
"Ensure your code is fully responsive, and uses Tailwind CSS. "
|
| 614 |
+
"Provide the code for the main web app, a README.md, and an MIT LICENSE."
|
| 615 |
+
)
|
| 616 |
+
|
| 617 |
+
# Add attachment context if files were provided, regardless of round.
|
| 618 |
+
if attachment_list_str:
|
| 619 |
+
llm_prompt += f"\nAdditional context: The following files are available in the project root: {attachment_list_str}. "
|
| 620 |
+
llm_prompt += f"Ensure your code references these files correctly (if applicable)."
|
| 621 |
+
# --- MODIFICATION END ---
|
| 622 |
+
|
| 623 |
+
# Call LLM
|
| 624 |
+
generated_files = await call_llm_for_code(llm_prompt, task_id, image_parts)
|
| 625 |
+
|
| 626 |
+
# 4. Save Generated Code Locally
|
| 627 |
+
# This overwrites the cloned files (index.html, README.md, LICENSE)
|
| 628 |
+
await save_generated_files_locally(task_id, generated_files)
|
| 629 |
+
|
| 630 |
+
# 5. Save Attachments Locally
|
| 631 |
+
# This adds attachments (like data.csv) to the local directory
|
| 632 |
+
# The attachment saving now happens *after* the clone/init, resolving the Round 2 error.
|
| 633 |
+
await save_attachments_locally(local_path, attachments)
|
| 634 |
+
|
| 635 |
+
# 6. COMMIT AND PUBLISH
|
| 636 |
+
print(f"--- [DEPLOYMENT] Committing and Publishing task {task_id}, Round {round_index} to GitHub... ---")
|
| 637 |
+
|
| 638 |
+
deployment_info = await commit_and_publish(
|
| 639 |
+
repo=repo,
|
| 640 |
+
task_id=task_id,
|
| 641 |
+
round_index=round_index,
|
| 642 |
+
repo_name=repo_name
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
repo_url = deployment_info["repo_url"]
|
| 646 |
+
commit_sha = deployment_info["commit_sha"]
|
| 647 |
+
pages_url = deployment_info["pages_url"]
|
| 648 |
+
|
| 649 |
+
print(f"--- [DEPLOYMENT] Success! Repo: {repo_url}, Pages: {pages_url} ---")
|
| 650 |
+
|
| 651 |
+
# 7. Notify the Evaluation Server
|
| 652 |
+
await notify_evaluation_server(
|
| 653 |
+
evaluation_url=evaluation_url,
|
| 654 |
+
email=email,
|
| 655 |
+
task_id=task_id,
|
| 656 |
+
round_index=round_index,
|
| 657 |
+
nonce=nonce,
|
| 658 |
+
repo_url=repo_url,
|
| 659 |
+
commit_sha=commit_sha,
|
| 660 |
+
pages_url=pages_url
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
except Exception as e:
|
| 664 |
+
print(f"--- [CRITICAL FAILURE] Task {task_id} failed during processing: {e} ---")
|
| 665 |
+
|
| 666 |
+
print(f"--- [PROCESS END] Background task for {task_id} completed. ---")
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
# --- FastAPI Endpoint ---
|
| 670 |
+
|
| 671 |
+
@app.post("/ready", status_code=200)
|
| 672 |
+
async def receive_task(task_data: TaskRequest):
|
| 673 |
+
"""
|
| 674 |
+
API endpoint that receives the task payload.
|
| 675 |
+
It verifies the secret and starts the generation/deployment process in the background.
|
| 676 |
+
"""
|
| 677 |
+
global received_task_data
|
| 678 |
+
|
| 679 |
+
# 1. SECRET VERIFICATION (CRITICAL PROJECT REQUIREMENT)
|
| 680 |
+
if not verify_secret(task_data.secret):
|
| 681 |
+
print(f"--- FAILED SECRET VERIFICATION for task {task_data.task} ---")
|
| 682 |
+
raise HTTPException(
|
| 683 |
+
status_code=401,
|
| 684 |
+
detail="Unauthorized: Secret does not match configured student secret."
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
# Store data and print initial confirmation
|
| 688 |
+
received_task_data = task_data.dict()
|
| 689 |
+
|
| 690 |
+
print("--- TASK RECEIVED SUCCESSFULLY ---")
|
| 691 |
+
print(f"Task ID: {received_task_data['task']}, Round: {received_task_data['round']}")
|
| 692 |
+
|
| 693 |
+
# Start the processing function in the background
|
| 694 |
+
asyncio.create_task(generate_files_and_deploy(task_data))
|
| 695 |
+
|
| 696 |
+
# Respond immediately with 200 OK to the evaluation server
|
| 697 |
+
return JSONResponse(
|
| 698 |
+
status_code=200,
|
| 699 |
+
content={"status": "ready", "message": f"Task {task_data.task} received and processing started."}
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
@app.get("/")
|
| 703 |
+
async def root():
|
| 704 |
+
return {"message": "Task Receiver Service is running. Post to /ready to submit a task."}
|
| 705 |
+
|
| 706 |
+
@app.get("/status")
|
| 707 |
+
async def get_status():
|
| 708 |
+
global received_task_data
|
| 709 |
+
if received_task_data:
|
| 710 |
+
# Note: This status only shows the last received request, not the live status of the background task.
|
| 711 |
+
return {"last_received_task": received_task_data}
|
| 712 |
+
else:
|
| 713 |
+
return {"message": "Awaiting first task submission to /ready"}
|
models.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field, EmailStr
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
# Data model for attachments (like the sample image)
|
| 5 |
+
class Attachment(BaseModel):
|
| 6 |
+
name: str = Field(..., description="Name of the attached file (e.g., 'sample.png')")
|
| 7 |
+
url: str = Field(..., description="The content encoded as a data URI (data:image/png;base64,...)")
|
| 8 |
+
|
| 9 |
+
# The main data model for the incoming task payload
|
| 10 |
+
class TaskRequest(BaseModel):
|
| 11 |
+
# Student email ID
|
| 12 |
+
email: EmailStr = Field(..., description="Student email ID")
|
| 13 |
+
# Student-provided secret
|
| 14 |
+
secret: str = Field(..., description="Student-provided secret")
|
| 15 |
+
# A unique task ID.
|
| 16 |
+
task: str = Field(..., description="A unique task ID (e.g., 'captcha-solver-...')")
|
| 17 |
+
# There will be multiple rounds per task. This is the round index
|
| 18 |
+
round: int = Field(..., description="The round index (e.g., 1)")
|
| 19 |
+
# Pass this nonce back to the evaluation URL
|
| 20 |
+
nonce: str = Field(..., description="Pass this nonce back to the evaluation URL below")
|
| 21 |
+
# brief: mentions what the app needs to do
|
| 22 |
+
brief: str = Field(..., description="Brief description of what the app needs to do")
|
| 23 |
+
# checks: mention how it will be evaluated
|
| 24 |
+
checks: List[str] = Field(..., description="Evaluation checks (e.g., license, readme quality)")
|
| 25 |
+
# Send repo & commit details to the URL below
|
| 26 |
+
evaluation_url: str = Field(..., description="URL to send repo & commit details")
|
| 27 |
+
# Attachments will be encoded as data URIs
|
| 28 |
+
attachments: List[Attachment] = Field(..., description="Attachments encoded as data URIs")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# from pydantic import BaseModel, EmailStr
|
| 33 |
+
# from typing import List, Optional
|
| 34 |
+
|
| 35 |
+
# # Defines the structure for an individual attachment, like a sample captcha image
|
| 36 |
+
# class Attachment(BaseModel):
|
| 37 |
+
# """
|
| 38 |
+
# Represents an attachment provided in the task payload.
|
| 39 |
+
# The 'url' is expected to be a data URI (e.g., base64 encoded image).
|
| 40 |
+
# """
|
| 41 |
+
# name: str
|
| 42 |
+
# url: str
|
| 43 |
+
|
| 44 |
+
# # Defines the complete structure of the JSON request body
|
| 45 |
+
# class TaskRequest(BaseModel):
|
| 46 |
+
# """
|
| 47 |
+
# The main model representing the task request sent by the evaluation server.
|
| 48 |
+
# """
|
| 49 |
+
# email: EmailStr # Enforces a valid email format
|
| 50 |
+
# secret: str
|
| 51 |
+
# task: str
|
| 52 |
+
# round: int
|
| 53 |
+
# nonce: str
|
| 54 |
+
# brief: str
|
| 55 |
+
# checks: List[str] # A list of strings detailing the evaluation checks
|
| 56 |
+
# evaluation_url: str
|
| 57 |
+
# attachments: List[Attachment] # A list of Attachment objects
|
| 58 |
+
|
| 59 |
+
# # Configuration for Pydantic to allow validation from dicts/JSON
|
| 60 |
+
# class Config:
|
| 61 |
+
# schema_extra = {
|
| 62 |
+
# "example": {
|
| 63 |
+
# "email": "student@example.com",
|
| 64 |
+
# "secret": "my-secure-token",
|
| 65 |
+
# "task": "captcha-solver-12345",
|
| 66 |
+
# "round": 1,
|
| 67 |
+
# "nonce": "ab12-cd34-ef56",
|
| 68 |
+
# "brief": "Create a captcha solver that handles ?url=https://.../image.png.",
|
| 69 |
+
# "checks": [
|
| 70 |
+
# "Repo has MIT license",
|
| 71 |
+
# "README.md is professional"
|
| 72 |
+
# ],
|
| 73 |
+
# "evaluation_url": "https://example.com/notify",
|
| 74 |
+
# "attachments": [
|
| 75 |
+
# {
|
| 76 |
+
# "name": "sample.png",
|
| 77 |
+
# "url": "data:image/png;base64,iVBORw..."
|
| 78 |
+
# }
|
| 79 |
+
# ]
|
| 80 |
+
# }
|
| 81 |
+
# }
|
requirements.txt
ADDED
|
Binary file (216 Bytes). View file
|
|
|