Spaces:
Runtime error
Runtime error
Upload 7 files
Browse filesfirst attempt of uploading
- README.md +20 -13
- __init__.py +1 -0
- app.py +43 -0
- generators.py +19 -0
- planner.py +21 -0
- requirements.txt +5 -0
- searcher.py +19 -0
README.md
CHANGED
|
@@ -1,13 +1,20 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Course Creator Space
|
| 2 |
+
|
| 3 |
+
This private space allows you to plan and generate a full course package from a chat conversation with integrated web research.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- Chat interface to collect the user's course idea
|
| 8 |
+
- Web research using Tavily API (or SerpAPI if configured)
|
| 9 |
+
- Course planning using the OpenAI Chat API
|
| 10 |
+
- Course package generation with a plan and source citations compressed into a zip file
|
| 11 |
+
|
| 12 |
+
## Setup
|
| 13 |
+
|
| 14 |
+
1. Add your API keys in the Space repository secrets:
|
| 15 |
+
- `OPENAI_API_KEY`
|
| 16 |
+
- `TAVILY_API_KEY`
|
| 17 |
+
- optionally set `OPENAI_MODEL`, `TEMPERATURE`, `MAX_OUTPUT_TOKENS`
|
| 18 |
+
2. Install dependencies with `pip install -r requirements.txt` (done automatically on Spaces).
|
| 19 |
+
3. Run the Gradio app with `python app.py`.
|
| 20 |
+
|
__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# course_agent package
|
app.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from course_agent.planner import plan_course
|
| 4 |
+
from course_agent.generators import generate_course_zip
|
| 5 |
+
from course_agent.searcher import run_web_search
|
| 6 |
+
|
| 7 |
+
# store chat messages and sources globally
|
| 8 |
+
chat_history = []
|
| 9 |
+
sources = []
|
| 10 |
+
|
| 11 |
+
def handle_user_message(message):
|
| 12 |
+
global chat_history
|
| 13 |
+
chat_history.append({"role": "user", "content": message})
|
| 14 |
+
# simple display of conversation
|
| 15 |
+
return "\n\n".join([m["role"] + ': ' + m["content"] for m in chat_history])
|
| 16 |
+
|
| 17 |
+
def handle_search(query, num_results=5, domain_filter=""):
|
| 18 |
+
global sources
|
| 19 |
+
results = run_web_search(query, num_results=num_results, domain_filter=domain_filter)
|
| 20 |
+
sources.extend(results)
|
| 21 |
+
# return string summarizing sources
|
| 22 |
+
return "\n".join([f"{r.get('title','')} - {r.get('url','')}" for r in results])
|
| 23 |
+
|
| 24 |
+
def handle_generate():
|
| 25 |
+
plan = plan_course(chat_history, sources)
|
| 26 |
+
zip_path = generate_course_zip(plan, sources)
|
| 27 |
+
return zip_path
|
| 28 |
+
|
| 29 |
+
with gr.Blocks() as demo:
|
| 30 |
+
gr.Markdown("# Course Agent\nChat about your course idea, search for sources, and generate a course package.")
|
| 31 |
+
message = gr.Textbox(label="Your message")
|
| 32 |
+
chat_display = gr.Textbox(label="Conversation", interactive=False)
|
| 33 |
+
search_query = gr.Textbox(label="Search query")
|
| 34 |
+
domain_filter = gr.Textbox(label="Domain filter (comma-separated)", placeholder=".edu,.gov,acm.org")
|
| 35 |
+
search_results = gr.Textbox(label="Search results", interactive=False)
|
| 36 |
+
generate_btn = gr.Button("Generate Course Package")
|
| 37 |
+
file_output = gr.File(label="course.zip")
|
| 38 |
+
|
| 39 |
+
message.submit(handle_user_message, inputs=message, outputs=chat_display)
|
| 40 |
+
search_query.submit(handle_search, inputs=search_query, outputs=search_results)
|
| 41 |
+
generate_btn.click(handle_generate, outputs=file_output)
|
| 42 |
+
|
| 43 |
+
demo.launch()
|
generators.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import zipfile
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def generate_course_zip(plan, sources, out_dir="/tmp/course_agent"):
|
| 7 |
+
"""Generate a zip package containing the course plan and sources."""
|
| 8 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 9 |
+
plan_path = os.path.join(out_dir, "plan.txt")
|
| 10 |
+
with open(plan_path, "w") as f:
|
| 11 |
+
f.write(plan)
|
| 12 |
+
sources_path = os.path.join(out_dir, "sources.json")
|
| 13 |
+
with open(sources_path, "w") as f:
|
| 14 |
+
json.dump(sources, f)
|
| 15 |
+
zip_path = os.path.join(out_dir, "course.zip")
|
| 16 |
+
with zipfile.ZipFile(zip_path, "w") as zf:
|
| 17 |
+
zf.write(plan_path, arcname="plan.txt")
|
| 18 |
+
zf.write(sources_path, arcname="sources.json")
|
| 19 |
+
return zip_path
|
planner.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import openai
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def plan_course(messages, sources):
|
| 6 |
+
"""Use OpenAI to plan a course based on messages and sources."""
|
| 7 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 8 |
+
system_prompt = "You are an expert course planner. Use the conversation and sources to propose a structured plan."
|
| 9 |
+
formatted_messages = [{"role": "system", "content": system_prompt}]
|
| 10 |
+
# append conversation messages
|
| 11 |
+
for msg in messages:
|
| 12 |
+
formatted_messages.append(msg)
|
| 13 |
+
# simple call to OpenAI chat completion
|
| 14 |
+
response = openai.ChatCompletion.create(
|
| 15 |
+
model=os.getenv("OPENAI_MODEL", "gpt-5-mini"),
|
| 16 |
+
messages=formatted_messages,
|
| 17 |
+
temperature=float(os.getenv("TEMPERATURE", "0.7")),
|
| 18 |
+
max_tokens=int(os.getenv("MAX_OUTPUT_TOKENS", "2048")),
|
| 19 |
+
)
|
| 20 |
+
plan_text = response["choices"][0]["message"]["content"]
|
| 21 |
+
return plan_text
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=3.0.0
|
| 2 |
+
openai>=1.0.0
|
| 3 |
+
tavily-python>=0.3.0
|
| 4 |
+
pydantic>=1.10.0
|
| 5 |
+
python-dotenv>=1.0.0
|
searcher.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def run_web_search(query, num_results=5, domain_filter=""):
|
| 5 |
+
"""Run a web search using Tavily API."""
|
| 6 |
+
try:
|
| 7 |
+
from tavily import TavilyClient
|
| 8 |
+
except ImportError:
|
| 9 |
+
raise ImportError("Please install tavily-python")
|
| 10 |
+
api_key = os.getenv("TAVILY_API_KEY")
|
| 11 |
+
if not api_key:
|
| 12 |
+
raise ValueError("TAVILY_API_KEY environment variable is required")
|
| 13 |
+
client = TavilyClient(api_key=api_key)
|
| 14 |
+
params = {"num": num_results}
|
| 15 |
+
if domain_filter:
|
| 16 |
+
# Tavily does not support domain filter directly; pass as search_kwargs if needed
|
| 17 |
+
params["search_kwargs"] = {"site": domain_filter}
|
| 18 |
+
results = client.search(query, **params)
|
| 19 |
+
return results
|