Upload folder using huggingface_hub
Browse files- README.md +3 -3
- ankigen_core/agents/base.py +10 -5
- ankigen_core/agents/config.py +5 -3
- ankigen_core/agents/integration.py +18 -1
- ankigen_core/agents/schemas.py +1 -1
- ankigen_core/agents/token_tracker.py +1 -1
- ankigen_core/auto_config.py +4 -3
- ankigen_core/card_generator.py +42 -11
- ankigen_core/cli.py +2 -2
- ankigen_core/llm_interface.py +3 -3
- app.py +252 -165
- requirements.txt +72 -67
README.md
CHANGED
|
@@ -5,7 +5,7 @@ app_file: app.py
|
|
| 5 |
requirements: requirements.txt
|
| 6 |
python_version: 3.12
|
| 7 |
sdk: gradio
|
| 8 |
-
sdk_version: 6.
|
| 9 |
---
|
| 10 |
|
| 11 |
# AnkiGen - Anki Card Generator
|
|
@@ -77,7 +77,7 @@ uv run python -m ankigen_core.cli -p "Python Lists" --no-confirm
|
|
| 77 |
- `-p, --prompt`: Subject/topic (required)
|
| 78 |
- `--topics`: Number of topics (auto-detected if omitted)
|
| 79 |
- `--cards-per-topic`: Cards per topic (auto-detected if omitted)
|
| 80 |
-
- `--model`: Model choice (`gpt-
|
| 81 |
- `-o, --output`: Output file path
|
| 82 |
- `--format`: Export format (`apkg` or `csv`)
|
| 83 |
- `--no-confirm`: Skip confirmation prompt
|
|
@@ -134,4 +134,4 @@ BSD 2-Clause License
|
|
| 134 |
|
| 135 |
- Gradio library for the web interface
|
| 136 |
- OpenAI for GPT models
|
| 137 |
-
- Card design principles from ["An Opinionated Guide to Using Anki Correctly"](https://www.lesswrong.com/posts/7Q7DPSk4iGFJd8DRk/an-opinionated-guide-to-using-anki-correctly)
|
|
|
|
| 5 |
requirements: requirements.txt
|
| 6 |
python_version: 3.12
|
| 7 |
sdk: gradio
|
| 8 |
+
sdk_version: 6.1.0
|
| 9 |
---
|
| 10 |
|
| 11 |
# AnkiGen - Anki Card Generator
|
|
|
|
| 77 |
- `-p, --prompt`: Subject/topic (required)
|
| 78 |
- `--topics`: Number of topics (auto-detected if omitted)
|
| 79 |
- `--cards-per-topic`: Cards per topic (auto-detected if omitted)
|
| 80 |
+
- `--model`: Model choice (`gpt-5.2-auto`, `gpt-5.2-instant`, or `gpt-5.2-thinking`)
|
| 81 |
- `-o, --output`: Output file path
|
| 82 |
- `--format`: Export format (`apkg` or `csv`)
|
| 83 |
- `--no-confirm`: Skip confirmation prompt
|
|
|
|
| 134 |
|
| 135 |
- Gradio library for the web interface
|
| 136 |
- OpenAI for GPT models
|
| 137 |
+
- Card design principles from ["An Opinionated Guide to Using Anki Correctly"](https://www.lesswrong.com/posts/7Q7DPSk4iGFJd8DRk/an-opinionated-guide-to-using-anki-correctly)
|
ankigen_core/agents/base.py
CHANGED
|
@@ -36,7 +36,8 @@ class AgentConfig:
|
|
| 36 |
|
| 37 |
name: str
|
| 38 |
instructions: str
|
| 39 |
-
model: str = "gpt-5.
|
|
|
|
| 40 |
temperature: float = 0.7
|
| 41 |
max_tokens: Optional[int] = None
|
| 42 |
timeout: float = 30.0
|
|
@@ -67,17 +68,21 @@ class BaseAgentWrapper:
|
|
| 67 |
|
| 68 |
set_default_openai_client(self.openai_client, use_for_tracing=False)
|
| 69 |
|
| 70 |
-
# Create model settings with temperature and
|
| 71 |
model_settings_kwargs = {"temperature": self.config.temperature}
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
# GPT-5.
|
| 74 |
if (
|
| 75 |
-
|
|
|
|
| 76 |
and "chat-latest" not in self.config.model
|
| 77 |
):
|
| 78 |
from openai.types.shared import Reasoning
|
| 79 |
|
| 80 |
-
model_settings_kwargs["reasoning"] = Reasoning(effort=
|
| 81 |
|
| 82 |
model_settings = ModelSettings(**model_settings_kwargs)
|
| 83 |
|
|
|
|
| 36 |
|
| 37 |
name: str
|
| 38 |
instructions: str
|
| 39 |
+
model: str = "gpt-5.2"
|
| 40 |
+
reasoning_effort: Optional[str] = None
|
| 41 |
temperature: float = 0.7
|
| 42 |
max_tokens: Optional[int] = None
|
| 43 |
timeout: float = 30.0
|
|
|
|
| 68 |
|
| 69 |
set_default_openai_client(self.openai_client, use_for_tracing=False)
|
| 70 |
|
| 71 |
+
# Create model settings with temperature and optional reasoning effort
|
| 72 |
model_settings_kwargs = {"temperature": self.config.temperature}
|
| 73 |
+
effort = self.config.reasoning_effort
|
| 74 |
+
if effort in ("auto", "", None):
|
| 75 |
+
effort = None
|
| 76 |
|
| 77 |
+
# GPT-5.x (not chat-latest) supports reasoning_effort
|
| 78 |
if (
|
| 79 |
+
effort
|
| 80 |
+
and self.config.model.startswith("gpt-5")
|
| 81 |
and "chat-latest" not in self.config.model
|
| 82 |
):
|
| 83 |
from openai.types.shared import Reasoning
|
| 84 |
|
| 85 |
+
model_settings_kwargs["reasoning"] = Reasoning(effort=effort)
|
| 86 |
|
| 87 |
model_settings = ModelSettings(**model_settings_kwargs)
|
| 88 |
|
ankigen_core/agents/config.py
CHANGED
|
@@ -91,7 +91,7 @@ class AgentConfigManager:
|
|
| 91 |
|
| 92 |
# Default models for each agent type
|
| 93 |
default_models = {
|
| 94 |
-
"subject_expert_model": "gpt-5.
|
| 95 |
}
|
| 96 |
|
| 97 |
# Simple mapping: agent_name -> agent_name_model
|
|
@@ -111,7 +111,8 @@ class AgentConfigManager:
|
|
| 111 |
config = AgentConfig(
|
| 112 |
name=agent_data.get("name", agent_name),
|
| 113 |
instructions=agent_data.get("instructions", ""),
|
| 114 |
-
model=agent_data.get("model", "gpt-5.
|
|
|
|
| 115 |
temperature=agent_data.get("temperature", 0.7),
|
| 116 |
max_tokens=agent_data.get("max_tokens"),
|
| 117 |
timeout=agent_data.get("timeout", 30.0),
|
|
@@ -176,7 +177,8 @@ class AgentConfigManager:
|
|
| 176 |
config = AgentConfig(
|
| 177 |
name=agent_name,
|
| 178 |
instructions=agent_data.get("instructions", ""),
|
| 179 |
-
model=agent_data.get("model", "gpt-5.
|
|
|
|
| 180 |
temperature=agent_data.get("temperature", 0.7),
|
| 181 |
max_tokens=agent_data.get("max_tokens"),
|
| 182 |
timeout=agent_data.get("timeout", 30.0),
|
|
|
|
| 91 |
|
| 92 |
# Default models for each agent type
|
| 93 |
default_models = {
|
| 94 |
+
"subject_expert_model": "gpt-5.2",
|
| 95 |
}
|
| 96 |
|
| 97 |
# Simple mapping: agent_name -> agent_name_model
|
|
|
|
| 111 |
config = AgentConfig(
|
| 112 |
name=agent_data.get("name", agent_name),
|
| 113 |
instructions=agent_data.get("instructions", ""),
|
| 114 |
+
model=agent_data.get("model", "gpt-5.2"),
|
| 115 |
+
reasoning_effort=agent_data.get("reasoning_effort"),
|
| 116 |
temperature=agent_data.get("temperature", 0.7),
|
| 117 |
max_tokens=agent_data.get("max_tokens"),
|
| 118 |
timeout=agent_data.get("timeout", 30.0),
|
|
|
|
| 177 |
config = AgentConfig(
|
| 178 |
name=agent_name,
|
| 179 |
instructions=agent_data.get("instructions", ""),
|
| 180 |
+
model=agent_data.get("model", "gpt-5.2"),
|
| 181 |
+
reasoning_effort=agent_data.get("reasoning_effort"),
|
| 182 |
temperature=agent_data.get("temperature", 0.7),
|
| 183 |
max_tokens=agent_data.get("max_tokens"),
|
| 184 |
timeout=agent_data.get("timeout", 30.0),
|
ankigen_core/agents/integration.py
CHANGED
|
@@ -21,7 +21,12 @@ class AgentOrchestrator:
|
|
| 21 |
|
| 22 |
self.subject_expert = None
|
| 23 |
|
| 24 |
-
async def initialize(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
"""Initialize the agent system"""
|
| 26 |
try:
|
| 27 |
# Initialize OpenAI client
|
|
@@ -29,6 +34,7 @@ class AgentOrchestrator:
|
|
| 29 |
self.openai_client = self.client_manager.get_client()
|
| 30 |
|
| 31 |
# Set up model overrides if provided
|
|
|
|
| 32 |
if model_overrides:
|
| 33 |
from ankigen_core.agents.config import get_config_manager
|
| 34 |
|
|
@@ -36,6 +42,17 @@ class AgentOrchestrator:
|
|
| 36 |
config_manager.update_models(model_overrides)
|
| 37 |
logger.info(f"Applied model overrides: {model_overrides}")
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
logger.info("Agent system initialized successfully (simplified pipeline)")
|
| 40 |
|
| 41 |
except Exception as e:
|
|
|
|
| 21 |
|
| 22 |
self.subject_expert = None
|
| 23 |
|
| 24 |
+
async def initialize(
|
| 25 |
+
self,
|
| 26 |
+
api_key: str,
|
| 27 |
+
model_overrides: Dict[str, str] = None,
|
| 28 |
+
reasoning_overrides: Dict[str, Optional[str]] = None,
|
| 29 |
+
):
|
| 30 |
"""Initialize the agent system"""
|
| 31 |
try:
|
| 32 |
# Initialize OpenAI client
|
|
|
|
| 34 |
self.openai_client = self.client_manager.get_client()
|
| 35 |
|
| 36 |
# Set up model overrides if provided
|
| 37 |
+
config_manager = None
|
| 38 |
if model_overrides:
|
| 39 |
from ankigen_core.agents.config import get_config_manager
|
| 40 |
|
|
|
|
| 42 |
config_manager.update_models(model_overrides)
|
| 43 |
logger.info(f"Applied model overrides: {model_overrides}")
|
| 44 |
|
| 45 |
+
if reasoning_overrides:
|
| 46 |
+
if config_manager is None:
|
| 47 |
+
from ankigen_core.agents.config import get_config_manager
|
| 48 |
+
|
| 49 |
+
config_manager = get_config_manager()
|
| 50 |
+
for agent_name, effort in reasoning_overrides.items():
|
| 51 |
+
config_manager.update_agent_config(
|
| 52 |
+
agent_name, reasoning_effort=effort
|
| 53 |
+
)
|
| 54 |
+
logger.info(f"Applied reasoning overrides: {reasoning_overrides}")
|
| 55 |
+
|
| 56 |
logger.info("Agent system initialized successfully (simplified pipeline)")
|
| 57 |
|
| 58 |
except Exception as e:
|
ankigen_core/agents/schemas.py
CHANGED
|
@@ -173,7 +173,7 @@ class AutoConfigSchema(BaseModel):
|
|
| 173 |
)
|
| 174 |
model_choice: str = Field(
|
| 175 |
...,
|
| 176 |
-
description="Recommended model: 'gpt-5.
|
| 177 |
)
|
| 178 |
|
| 179 |
# Analysis metadata
|
|
|
|
| 173 |
)
|
| 174 |
model_choice: str = Field(
|
| 175 |
...,
|
| 176 |
+
description="Recommended model: 'gpt-5.2-auto', 'gpt-5.2-instant', or 'gpt-5.2-thinking'",
|
| 177 |
)
|
| 178 |
|
| 179 |
# Analysis metadata
|
ankigen_core/agents/token_tracker.py
CHANGED
|
@@ -48,7 +48,7 @@ class TokenTracker:
|
|
| 48 |
|
| 49 |
Args:
|
| 50 |
messages: List of message dicts (each with 'role', 'content', optional 'name')
|
| 51 |
-
model: OpenAI model identifier (e.g., 'gpt-
|
| 52 |
|
| 53 |
Returns:
|
| 54 |
Total tokens required to send these messages to the model
|
|
|
|
| 48 |
|
| 49 |
Args:
|
| 50 |
messages: List of message dicts (each with 'role', 'content', optional 'name')
|
| 51 |
+
model: OpenAI model identifier (e.g., 'gpt-5.2', 'gpt-4o')
|
| 52 |
|
| 53 |
Returns:
|
| 54 |
Total tokens required to send these messages to the model
|
ankigen_core/auto_config.py
CHANGED
|
@@ -53,7 +53,8 @@ Consider:
|
|
| 53 |
4. Determine content type: concepts (theory/understanding), syntax (code/commands), api (library usage), practical (hands-on skills)
|
| 54 |
5. TOPIC DECOMPOSITION: Break down the subject into distinct subtopics that together provide comprehensive coverage
|
| 55 |
6. Recommend cloze cards for syntax/code, basic cards for concepts
|
| 56 |
-
7. Choose model based on complexity: gpt-
|
|
|
|
| 57 |
|
| 58 |
TOPIC DECOMPOSITION (topics_list):
|
| 59 |
You MUST provide a topics_list - a list of distinct subtopics that together cover the subject comprehensively.
|
|
@@ -96,7 +97,7 @@ Provide a brief rationale for your choices."""
|
|
| 96 |
try:
|
| 97 |
config = await structured_agent_call(
|
| 98 |
openai_client=openai_client,
|
| 99 |
-
model="gpt-5.
|
| 100 |
instructions=system_prompt,
|
| 101 |
user_input=user_prompt,
|
| 102 |
output_type=AutoConfigSchema,
|
|
@@ -128,7 +129,7 @@ Provide a brief rationale for your choices."""
|
|
| 128 |
cards_per_topic=8,
|
| 129 |
learning_preferences="Focus on fundamental concepts and core principles with practical examples",
|
| 130 |
generate_cloze=False,
|
| 131 |
-
model_choice="gpt-5.
|
| 132 |
subject_type="concepts",
|
| 133 |
scope="medium",
|
| 134 |
rationale="Using default settings due to analysis error",
|
|
|
|
| 53 |
4. Determine content type: concepts (theory/understanding), syntax (code/commands), api (library usage), practical (hands-on skills)
|
| 54 |
5. TOPIC DECOMPOSITION: Break down the subject into distinct subtopics that together provide comprehensive coverage
|
| 55 |
6. Recommend cloze cards for syntax/code, basic cards for concepts
|
| 56 |
+
7. Choose model based on complexity: gpt-5.2-thinking for complex topics, gpt-5.2-instant for basic/simple, gpt-5.2-auto for mixed scope
|
| 57 |
+
- Valid model_choice values: "gpt-5.2-auto", "gpt-5.2-instant", "gpt-5.2-thinking"
|
| 58 |
|
| 59 |
TOPIC DECOMPOSITION (topics_list):
|
| 60 |
You MUST provide a topics_list - a list of distinct subtopics that together cover the subject comprehensively.
|
|
|
|
| 97 |
try:
|
| 98 |
config = await structured_agent_call(
|
| 99 |
openai_client=openai_client,
|
| 100 |
+
model="gpt-5.2",
|
| 101 |
instructions=system_prompt,
|
| 102 |
user_input=user_prompt,
|
| 103 |
output_type=AutoConfigSchema,
|
|
|
|
| 129 |
cards_per_topic=8,
|
| 130 |
learning_preferences="Focus on fundamental concepts and core principles with practical examples",
|
| 131 |
generate_cloze=False,
|
| 132 |
+
model_choice="gpt-5.2-auto",
|
| 133 |
subject_type="concepts",
|
| 134 |
scope="medium",
|
| 135 |
rationale="Using default settings due to analysis error",
|
ankigen_core/card_generator.py
CHANGED
|
@@ -30,19 +30,19 @@ logger.info("Agent system loaded successfully")
|
|
| 30 |
# --- Constants --- (Moved from app.py)
|
| 31 |
AVAILABLE_MODELS = [
|
| 32 |
{
|
| 33 |
-
"value": "gpt-5.
|
| 34 |
-
"label": "GPT-5.
|
| 35 |
-
"description": "
|
| 36 |
},
|
| 37 |
{
|
| 38 |
-
"value": "gpt-
|
| 39 |
-
"label": "GPT-
|
| 40 |
-
"description": "
|
| 41 |
},
|
| 42 |
{
|
| 43 |
-
"value": "gpt-
|
| 44 |
-
"label": "GPT-
|
| 45 |
-
"description": "
|
| 46 |
},
|
| 47 |
]
|
| 48 |
|
|
@@ -60,6 +60,32 @@ GENERATION_MODES = [
|
|
| 60 |
# Legacy functions removed - all card generation now handled by agent system
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
def _map_generation_mode_to_subject(generation_mode: str, subject: str) -> str:
|
| 64 |
"""Map UI generation mode to agent subject."""
|
| 65 |
if generation_mode == "subject":
|
|
@@ -145,8 +171,13 @@ async def orchestrate_card_generation(
|
|
| 145 |
token_tracker = get_token_tracker()
|
| 146 |
orchestrator = AgentOrchestrator(client_manager)
|
| 147 |
|
| 148 |
-
|
| 149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
agent_subject = _map_generation_mode_to_subject(generation_mode, subject)
|
| 152 |
context = _build_generation_context(generation_mode, source_text)
|
|
|
|
| 30 |
# --- Constants --- (Moved from app.py)
|
| 31 |
AVAILABLE_MODELS = [
|
| 32 |
{
|
| 33 |
+
"value": "gpt-5.2-auto",
|
| 34 |
+
"label": "GPT-5.2 Auto",
|
| 35 |
+
"description": "Adaptive reasoning",
|
| 36 |
},
|
| 37 |
{
|
| 38 |
+
"value": "gpt-5.2-instant",
|
| 39 |
+
"label": "GPT-5.2 Instant",
|
| 40 |
+
"description": "Fast, minimal reasoning",
|
| 41 |
},
|
| 42 |
{
|
| 43 |
+
"value": "gpt-5.2-thinking",
|
| 44 |
+
"label": "GPT-5.2 Thinking",
|
| 45 |
+
"description": "Higher reasoning effort",
|
| 46 |
},
|
| 47 |
]
|
| 48 |
|
|
|
|
| 60 |
# Legacy functions removed - all card generation now handled by agent system
|
| 61 |
|
| 62 |
|
| 63 |
+
def _parse_model_selection(model_selection: str) -> tuple[str, str | None]:
|
| 64 |
+
"""Parse model selection into model name and reasoning effort."""
|
| 65 |
+
if not model_selection:
|
| 66 |
+
return "gpt-5.2", None
|
| 67 |
+
|
| 68 |
+
normalized = model_selection.strip().lower()
|
| 69 |
+
if normalized == "gpt-5.2-auto":
|
| 70 |
+
return "gpt-5.2", None
|
| 71 |
+
if normalized == "gpt-5.2-instant":
|
| 72 |
+
return "gpt-5.2", "none"
|
| 73 |
+
if normalized == "gpt-5.2-thinking":
|
| 74 |
+
return "gpt-5.2", "high"
|
| 75 |
+
|
| 76 |
+
if "gpt-5.2" in normalized:
|
| 77 |
+
if "instant" in normalized:
|
| 78 |
+
return "gpt-5.2", "none"
|
| 79 |
+
if "thinking" in normalized:
|
| 80 |
+
return "gpt-5.2", "high"
|
| 81 |
+
if "auto" in normalized:
|
| 82 |
+
return "gpt-5.2", None
|
| 83 |
+
return "gpt-5.2", None
|
| 84 |
+
|
| 85 |
+
# Fallback for direct model names
|
| 86 |
+
return model_selection, None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
def _map_generation_mode_to_subject(generation_mode: str, subject: str) -> str:
|
| 90 |
"""Map UI generation mode to agent subject."""
|
| 91 |
if generation_mode == "subject":
|
|
|
|
| 171 |
token_tracker = get_token_tracker()
|
| 172 |
orchestrator = AgentOrchestrator(client_manager)
|
| 173 |
|
| 174 |
+
model_name_resolved, reasoning_effort = _parse_model_selection(model_name)
|
| 175 |
+
logger.info(f"Using {model_name_resolved} for SubjectExpertAgent")
|
| 176 |
+
await orchestrator.initialize(
|
| 177 |
+
api_key_input,
|
| 178 |
+
{"subject_expert": model_name_resolved},
|
| 179 |
+
{"subject_expert": reasoning_effort},
|
| 180 |
+
)
|
| 181 |
|
| 182 |
agent_subject = _map_generation_mode_to_subject(generation_mode, subject)
|
| 183 |
context = _build_generation_context(generation_mode, source_text)
|
ankigen_core/cli.py
CHANGED
|
@@ -143,7 +143,7 @@ async def generate_cards_from_config(
|
|
| 143 |
generation_mode="subject",
|
| 144 |
source_text="",
|
| 145 |
url_input="",
|
| 146 |
-
model_name=config.get("model_choice", "gpt-5.
|
| 147 |
topic_number=config.get("topic_number", 3),
|
| 148 |
cards_per_topic=config.get("cards_per_topic", 5),
|
| 149 |
preference_prompt=config.get("preference_prompt", ""),
|
|
@@ -221,7 +221,7 @@ def export_cards(
|
|
| 221 |
@click.option(
|
| 222 |
"--model",
|
| 223 |
type=click.Choice(
|
| 224 |
-
["gpt-5.
|
| 225 |
case_sensitive=False,
|
| 226 |
),
|
| 227 |
help="Model to use for generation (auto-selected if not specified)",
|
|
|
|
| 143 |
generation_mode="subject",
|
| 144 |
source_text="",
|
| 145 |
url_input="",
|
| 146 |
+
model_name=config.get("model_choice", "gpt-5.2-auto"),
|
| 147 |
topic_number=config.get("topic_number", 3),
|
| 148 |
cards_per_topic=config.get("cards_per_topic", 5),
|
| 149 |
preference_prompt=config.get("preference_prompt", ""),
|
|
|
|
| 221 |
@click.option(
|
| 222 |
"--model",
|
| 223 |
type=click.Choice(
|
| 224 |
+
["gpt-5.2-auto", "gpt-5.2-instant", "gpt-5.2-thinking"],
|
| 225 |
case_sensitive=False,
|
| 226 |
),
|
| 227 |
help="Model to use for generation (auto-selected if not specified)",
|
ankigen_core/llm_interface.py
CHANGED
|
@@ -129,7 +129,7 @@ async def structured_agent_call(
|
|
| 129 |
|
| 130 |
Args:
|
| 131 |
openai_client: AsyncOpenAI client instance
|
| 132 |
-
model: Model name (e.g., "gpt-5.
|
| 133 |
instructions: System instructions for the agent
|
| 134 |
user_input: User prompt/input
|
| 135 |
output_type: Pydantic model class for structured output
|
|
@@ -155,10 +155,10 @@ async def structured_agent_call(
|
|
| 155 |
# 2. Set up the OpenAI client for agents SDK
|
| 156 |
set_default_openai_client(openai_client, use_for_tracing=False)
|
| 157 |
|
| 158 |
-
# 3. Build model settings with GPT-5.
|
| 159 |
model_settings_kwargs: dict = {"temperature": temperature}
|
| 160 |
|
| 161 |
-
# GPT-5.
|
| 162 |
if model.startswith("gpt-5") and "chat-latest" not in model:
|
| 163 |
from openai.types.shared import Reasoning
|
| 164 |
|
|
|
|
| 129 |
|
| 130 |
Args:
|
| 131 |
openai_client: AsyncOpenAI client instance
|
| 132 |
+
model: Model name (e.g., "gpt-5.2", "gpt-5.2-chat-latest")
|
| 133 |
instructions: System instructions for the agent
|
| 134 |
user_input: User prompt/input
|
| 135 |
output_type: Pydantic model class for structured output
|
|
|
|
| 155 |
# 2. Set up the OpenAI client for agents SDK
|
| 156 |
set_default_openai_client(openai_client, use_for_tracing=False)
|
| 157 |
|
| 158 |
+
# 3. Build model settings with GPT-5.x reasoning support
|
| 159 |
model_settings_kwargs: dict = {"temperature": temperature}
|
| 160 |
|
| 161 |
+
# GPT-5.x (not chat-latest) supports reasoning_effort
|
| 162 |
if model.startswith("gpt-5") and "chat-latest" not in model:
|
| 163 |
from openai.types.shared import Reasoning
|
| 164 |
|
app.py
CHANGED
|
@@ -65,8 +65,9 @@ except (AttributeError, ImportError):
|
|
| 65 |
# CSS for the interface (moved to module level for Gradio 6 compatibility)
|
| 66 |
custom_css = """
|
| 67 |
#footer {display:none !important}
|
|
|
|
| 68 |
.tall-dataframe {min-height: 500px !important}
|
| 69 |
-
.contain {max-width: 100% !important; margin: auto;}
|
| 70 |
.output-cards {border-radius: 8px; box-shadow: 0 4px 6px -1px rgba(0,0,0,0.1);}
|
| 71 |
.hint-text {font-size: 0.9em; color: #666; margin-top: 4px;}
|
| 72 |
.export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
|
|
@@ -158,170 +159,187 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 158 |
gr.Markdown("# 📚 AnkiGen - Anki Card Generator")
|
| 159 |
gr.Markdown("#### Generate Anki flashcards using AI.")
|
| 160 |
|
| 161 |
-
with gr.
|
| 162 |
-
with gr.
|
| 163 |
-
with gr.
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
auto_fill_btn = gr.Button(
|
| 179 |
"Auto-fill",
|
| 180 |
variant="secondary",
|
| 181 |
)
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
| 189 |
)
|
|
|
|
|
|
|
| 190 |
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
)
|
| 195 |
-
with library_accordion:
|
| 196 |
-
library_name_input = gr.Textbox(
|
| 197 |
-
label="Library Name",
|
| 198 |
-
placeholder="e.g., 'react', 'tensorflow', 'pandas'",
|
| 199 |
-
info="Fetch up-to-date documentation for this library",
|
| 200 |
-
)
|
| 201 |
-
library_topic_input = gr.Textbox(
|
| 202 |
-
label="Documentation Focus (optional)",
|
| 203 |
-
placeholder="e.g., 'hooks', 'data loading', 'transforms'",
|
| 204 |
-
info="Specific topic within the library to focus on",
|
| 205 |
-
)
|
| 206 |
-
with gr.Column(scale=1):
|
| 207 |
-
with gr.Accordion("Advanced Settings", open=False):
|
| 208 |
-
model_choices_ui = [
|
| 209 |
-
(m["label"], m["value"]) for m in AVAILABLE_MODELS
|
| 210 |
-
]
|
| 211 |
-
default_model_value = next(
|
| 212 |
-
(
|
| 213 |
-
m["value"]
|
| 214 |
-
for m in AVAILABLE_MODELS
|
| 215 |
-
if "nano" in m["value"].lower()
|
| 216 |
-
),
|
| 217 |
-
AVAILABLE_MODELS[0]["value"],
|
| 218 |
-
)
|
| 219 |
-
model_choice = gr.Dropdown(
|
| 220 |
-
choices=model_choices_ui,
|
| 221 |
-
value=default_model_value,
|
| 222 |
-
label="Model Selection",
|
| 223 |
-
info="Select AI model for generation",
|
| 224 |
-
allow_custom_value=True,
|
| 225 |
-
)
|
| 226 |
-
topic_number = gr.Slider(
|
| 227 |
-
label="Number of Topics",
|
| 228 |
-
minimum=2,
|
| 229 |
-
maximum=20,
|
| 230 |
-
step=1,
|
| 231 |
-
value=2,
|
| 232 |
-
)
|
| 233 |
-
cards_per_topic = gr.Slider(
|
| 234 |
-
label="Cards per Topic",
|
| 235 |
-
minimum=2,
|
| 236 |
-
maximum=30,
|
| 237 |
-
step=1,
|
| 238 |
-
value=3,
|
| 239 |
-
)
|
| 240 |
-
preference_prompt = gr.Textbox(
|
| 241 |
-
label="Learning Preferences",
|
| 242 |
-
placeholder="e.g., 'Beginner focus'",
|
| 243 |
-
lines=3,
|
| 244 |
-
)
|
| 245 |
-
generate_cloze_checkbox = gr.Checkbox(
|
| 246 |
-
label="Generate Cloze Cards (Experimental)",
|
| 247 |
-
value=False,
|
| 248 |
-
)
|
| 249 |
gr.Markdown(
|
| 250 |
-
"
|
| 251 |
)
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
)
|
| 267 |
-
output = gr.DataFrame(
|
| 268 |
-
value=example_data,
|
| 269 |
-
headers=[
|
| 270 |
-
"Index",
|
| 271 |
-
"Topic",
|
| 272 |
-
"Card_Type",
|
| 273 |
-
"Question",
|
| 274 |
-
"Answer",
|
| 275 |
-
"Explanation",
|
| 276 |
-
"Example",
|
| 277 |
-
"Prerequisites",
|
| 278 |
-
"Learning_Outcomes",
|
| 279 |
-
"Difficulty",
|
| 280 |
-
],
|
| 281 |
-
datatype=[
|
| 282 |
-
"number",
|
| 283 |
-
"str",
|
| 284 |
-
"str",
|
| 285 |
-
"str",
|
| 286 |
-
"str",
|
| 287 |
-
"str",
|
| 288 |
-
"str",
|
| 289 |
-
"str",
|
| 290 |
-
"str",
|
| 291 |
-
"str",
|
| 292 |
-
],
|
| 293 |
-
interactive=True,
|
| 294 |
-
elem_classes="tall-dataframe",
|
| 295 |
-
wrap=True,
|
| 296 |
-
column_widths=[
|
| 297 |
-
50,
|
| 298 |
-
100,
|
| 299 |
-
80,
|
| 300 |
-
200,
|
| 301 |
-
200,
|
| 302 |
-
250,
|
| 303 |
-
200,
|
| 304 |
-
150,
|
| 305 |
-
150,
|
| 306 |
-
100,
|
| 307 |
-
],
|
| 308 |
-
)
|
| 309 |
-
total_cards_html = gr.HTML(
|
| 310 |
-
value="<div><b>Total Cards Generated:</b> <span id='total-cards-count'>0</span></div>",
|
| 311 |
-
visible=False,
|
| 312 |
-
)
|
| 313 |
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
|
|
|
|
|
|
| 325 |
|
| 326 |
# --- Event Handlers --- (Updated to use functions from ankigen_core)
|
| 327 |
generation_mode.change(
|
|
@@ -339,6 +357,26 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 339 |
],
|
| 340 |
)
|
| 341 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
# Define an async wrapper for the orchestrate_card_generation
|
| 343 |
async def handle_generate_click(
|
| 344 |
api_key_input_val,
|
|
@@ -353,7 +391,7 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 353 |
library_topic_val,
|
| 354 |
progress=gr.Progress(track_tqdm=True),
|
| 355 |
):
|
| 356 |
-
|
| 357 |
client_manager,
|
| 358 |
response_cache,
|
| 359 |
api_key_input_val,
|
|
@@ -369,8 +407,47 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 369 |
library_name=library_name_val if library_name_val else None,
|
| 370 |
library_topic=library_topic_val if library_topic_val else None,
|
| 371 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
generate_button.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
fn=handle_generate_click,
|
| 375 |
inputs=[
|
| 376 |
api_key_input,
|
|
@@ -384,8 +461,12 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 384 |
library_name_input,
|
| 385 |
library_topic_input,
|
| 386 |
],
|
| 387 |
-
outputs=[output, total_cards_html, token_usage_html],
|
| 388 |
show_progress="full",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 389 |
)
|
| 390 |
|
| 391 |
# Define handler for CSV export (similar to APKG)
|
|
@@ -514,11 +595,11 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 514 |
"""Handle auto-fill button click to populate all settings"""
|
| 515 |
if not subject_text or not subject_text.strip():
|
| 516 |
gr.Warning("Please enter a subject first")
|
| 517 |
-
return [gr.update()] *
|
| 518 |
|
| 519 |
if not api_key:
|
| 520 |
gr.Warning("OpenAI API key is required for auto-configuration")
|
| 521 |
-
return [gr.update()] *
|
| 522 |
|
| 523 |
try:
|
| 524 |
progress(0, desc="Analyzing subject...")
|
|
@@ -535,7 +616,13 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 535 |
|
| 536 |
if not config:
|
| 537 |
gr.Warning("Could not generate configuration")
|
| 538 |
-
return [gr.update()] *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 539 |
|
| 540 |
# Return updates for all relevant UI components
|
| 541 |
return (
|
|
@@ -545,10 +632,9 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 545 |
gr.update(
|
| 546 |
value=config.get("library_topic", "")
|
| 547 |
), # library_topic_input
|
| 548 |
-
gr.update(value=
|
| 549 |
-
gr.update(
|
| 550 |
-
|
| 551 |
-
), # cards_per_topic
|
| 552 |
gr.update(
|
| 553 |
value=config.get("preference_prompt", "")
|
| 554 |
), # preference_prompt
|
|
@@ -556,7 +642,7 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 556 |
value=config.get("generate_cloze_checkbox", False)
|
| 557 |
), # generate_cloze_checkbox
|
| 558 |
gr.update(
|
| 559 |
-
value=config.get("model_choice", "gpt-
|
| 560 |
), # model_choice
|
| 561 |
gr.update(
|
| 562 |
open=True
|
|
@@ -566,7 +652,7 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 566 |
except Exception as e:
|
| 567 |
logger.error(f"Auto-configuration failed: {e}", exc_info=True)
|
| 568 |
gr.Error(f"Auto-configuration failed: {str(e)}")
|
| 569 |
-
return [gr.update()] *
|
| 570 |
|
| 571 |
auto_fill_btn.click(
|
| 572 |
fn=handle_auto_fill_click,
|
|
@@ -576,6 +662,7 @@ def create_ankigen_interface(theme=None, css=None, js=None):
|
|
| 576 |
library_topic_input,
|
| 577 |
topic_number,
|
| 578 |
cards_per_topic,
|
|
|
|
| 579 |
preference_prompt,
|
| 580 |
generate_cloze_checkbox,
|
| 581 |
model_choice,
|
|
|
|
| 65 |
# CSS for the interface (moved to module level for Gradio 6 compatibility)
|
| 66 |
custom_css = """
|
| 67 |
#footer {display:none !important}
|
| 68 |
+
.gradio-container {max-width: 100% !important; padding: 0 24px;}
|
| 69 |
.tall-dataframe {min-height: 500px !important}
|
| 70 |
+
.contain {width: 100% !important; max-width: 100% !important; margin: 0 auto; box-sizing: border-box;}
|
| 71 |
.output-cards {border-radius: 8px; box-shadow: 0 4px 6px -1px rgba(0,0,0,0.1);}
|
| 72 |
.hint-text {font-size: 0.9em; color: #666; margin-top: 4px;}
|
| 73 |
.export-group > .gradio-group { margin-bottom: 0 !important; padding-bottom: 5px !important; }
|
|
|
|
| 159 |
gr.Markdown("# 📚 AnkiGen - Anki Card Generator")
|
| 160 |
gr.Markdown("#### Generate Anki flashcards using AI.")
|
| 161 |
|
| 162 |
+
with gr.Tabs(selected="setup") as main_tabs:
|
| 163 |
+
with gr.Tab("Setup", id="setup"):
|
| 164 |
+
with gr.Accordion("Configuration Settings", open=True):
|
| 165 |
+
with gr.Row():
|
| 166 |
+
with gr.Column(scale=1):
|
| 167 |
+
generation_mode = gr.Radio(
|
| 168 |
+
choices=[
|
| 169 |
+
("Single Subject", "subject"),
|
| 170 |
+
],
|
| 171 |
+
value="subject",
|
| 172 |
+
label="Generation Mode",
|
| 173 |
+
info="Choose how you want to generate content",
|
| 174 |
+
visible=False, # Hidden since only one mode exists
|
| 175 |
+
)
|
| 176 |
+
with gr.Group() as subject_mode:
|
| 177 |
+
subject = gr.Textbox(
|
| 178 |
+
label="Subject",
|
| 179 |
+
placeholder="e.g., 'Basic SQL Concepts'",
|
| 180 |
+
)
|
| 181 |
+
api_key_input = gr.Textbox(
|
| 182 |
+
label="OpenAI API Key",
|
| 183 |
+
type="password",
|
| 184 |
+
placeholder="Enter your OpenAI API key (sk-...)",
|
| 185 |
+
value=os.getenv("OPENAI_API_KEY", ""),
|
| 186 |
+
info="Your key is used solely for processing your requests.",
|
| 187 |
+
elem_id="api-key-textbox",
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
# Context7 Library Documentation
|
| 191 |
+
library_accordion = gr.Accordion(
|
| 192 |
+
"Library Documentation (optional)", open=True
|
| 193 |
+
)
|
| 194 |
+
with library_accordion:
|
| 195 |
+
library_name_input = gr.Textbox(
|
| 196 |
+
label="Library Name",
|
| 197 |
+
placeholder="e.g., 'react', 'tensorflow', 'pandas'",
|
| 198 |
+
info="Fetch up-to-date documentation for this library",
|
| 199 |
+
)
|
| 200 |
+
library_topic_input = gr.Textbox(
|
| 201 |
+
label="Documentation Focus (optional)",
|
| 202 |
+
placeholder="e.g., 'hooks', 'data loading', 'transforms'",
|
| 203 |
+
info="Specific topic within the library to focus on",
|
| 204 |
+
)
|
| 205 |
+
with gr.Column(scale=1):
|
| 206 |
+
with gr.Accordion("Advanced Settings", open=True):
|
| 207 |
+
model_choices_ui = [
|
| 208 |
+
(m["label"], m["value"])
|
| 209 |
+
for m in AVAILABLE_MODELS
|
| 210 |
+
]
|
| 211 |
+
default_model_value = next(
|
| 212 |
+
(
|
| 213 |
+
m["value"]
|
| 214 |
+
for m in AVAILABLE_MODELS
|
| 215 |
+
if m["value"] == "gpt-5.2-auto"
|
| 216 |
+
),
|
| 217 |
+
AVAILABLE_MODELS[0]["value"],
|
| 218 |
+
)
|
| 219 |
+
model_choice = gr.Dropdown(
|
| 220 |
+
choices=model_choices_ui,
|
| 221 |
+
value=default_model_value,
|
| 222 |
+
label="Model Selection",
|
| 223 |
+
info="Select AI model for generation",
|
| 224 |
+
allow_custom_value=True,
|
| 225 |
+
)
|
| 226 |
+
topic_number = gr.Slider(
|
| 227 |
+
label="Number of Topics",
|
| 228 |
+
minimum=2,
|
| 229 |
+
maximum=20,
|
| 230 |
+
step=1,
|
| 231 |
+
value=2,
|
| 232 |
+
)
|
| 233 |
+
cards_per_topic = gr.Slider(
|
| 234 |
+
label="Cards per Topic",
|
| 235 |
+
minimum=2,
|
| 236 |
+
maximum=30,
|
| 237 |
+
step=1,
|
| 238 |
+
value=3,
|
| 239 |
+
)
|
| 240 |
+
total_cards_preview = gr.Markdown(
|
| 241 |
+
f"**Total cards:** {2 * 3}"
|
| 242 |
+
)
|
| 243 |
+
preference_prompt = gr.Textbox(
|
| 244 |
+
label="Learning Preferences",
|
| 245 |
+
placeholder="e.g., 'Beginner focus'",
|
| 246 |
+
lines=3,
|
| 247 |
+
)
|
| 248 |
+
generate_cloze_checkbox = gr.Checkbox(
|
| 249 |
+
label="Generate Cloze Cards",
|
| 250 |
+
value=True,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
with gr.Row():
|
| 254 |
auto_fill_btn = gr.Button(
|
| 255 |
"Auto-fill",
|
| 256 |
variant="secondary",
|
| 257 |
)
|
| 258 |
+
generate_button = gr.Button(
|
| 259 |
+
"Generate Cards", variant="primary"
|
| 260 |
+
)
|
| 261 |
+
status_markdown = gr.Markdown("")
|
| 262 |
+
log_output = gr.Textbox(
|
| 263 |
+
label="Live Logs",
|
| 264 |
+
lines=8,
|
| 265 |
+
interactive=False,
|
| 266 |
)
|
| 267 |
+
generation_active = gr.State(False)
|
| 268 |
+
log_timer = gr.Timer(2)
|
| 269 |
|
| 270 |
+
with gr.Tab("Results", id="results"):
|
| 271 |
+
with gr.Group() as cards_output:
|
| 272 |
+
gr.Markdown("### Generated Cards")
|
| 273 |
+
with gr.Accordion("Output Format", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
gr.Markdown(
|
| 275 |
+
"Cards: Index, Topic, Type, Q, A, Explanation, Example, Prerequisites, Outcomes, Difficulty. Export: CSV, .apkg",
|
| 276 |
)
|
| 277 |
+
with gr.Accordion("Example Card Format", open=False):
|
| 278 |
+
gr.Code(
|
| 279 |
+
label="Example Card",
|
| 280 |
+
value='{"front": ..., "back": ..., "metadata": ...}',
|
| 281 |
+
language="json",
|
| 282 |
+
)
|
| 283 |
+
output = gr.DataFrame(
|
| 284 |
+
value=example_data,
|
| 285 |
+
headers=[
|
| 286 |
+
"Index",
|
| 287 |
+
"Topic",
|
| 288 |
+
"Card_Type",
|
| 289 |
+
"Question",
|
| 290 |
+
"Answer",
|
| 291 |
+
"Explanation",
|
| 292 |
+
"Example",
|
| 293 |
+
"Prerequisites",
|
| 294 |
+
"Learning_Outcomes",
|
| 295 |
+
"Difficulty",
|
| 296 |
+
],
|
| 297 |
+
datatype=[
|
| 298 |
+
"number",
|
| 299 |
+
"str",
|
| 300 |
+
"str",
|
| 301 |
+
"str",
|
| 302 |
+
"str",
|
| 303 |
+
"str",
|
| 304 |
+
"str",
|
| 305 |
+
"str",
|
| 306 |
+
"str",
|
| 307 |
+
"str",
|
| 308 |
+
],
|
| 309 |
+
interactive=True,
|
| 310 |
+
elem_classes="tall-dataframe",
|
| 311 |
+
wrap=True,
|
| 312 |
+
column_widths=[
|
| 313 |
+
50,
|
| 314 |
+
100,
|
| 315 |
+
80,
|
| 316 |
+
200,
|
| 317 |
+
200,
|
| 318 |
+
250,
|
| 319 |
+
200,
|
| 320 |
+
150,
|
| 321 |
+
150,
|
| 322 |
+
100,
|
| 323 |
+
],
|
| 324 |
+
)
|
| 325 |
+
total_cards_html = gr.HTML(
|
| 326 |
+
value="<div><b>Total Cards Generated:</b> <span id='total-cards-count'>0</span></div>",
|
| 327 |
+
visible=False,
|
| 328 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
+
# Token usage display
|
| 331 |
+
token_usage_html = gr.HTML(
|
| 332 |
+
value="<div style='margin-top: 8px;'><b>Token Usage:</b> <span id='token-usage-display'>No usage data</span></div>",
|
| 333 |
+
visible=True,
|
| 334 |
+
)
|
| 335 |
|
| 336 |
+
# Export buttons
|
| 337 |
+
with gr.Row(elem_classes="export-group"):
|
| 338 |
+
export_csv_button = gr.Button("Export to CSV")
|
| 339 |
+
export_apkg_button = gr.Button("Export to .apkg")
|
| 340 |
+
download_file_output = gr.File(
|
| 341 |
+
label="Download Deck", visible=False
|
| 342 |
+
)
|
| 343 |
|
| 344 |
# --- Event Handlers --- (Updated to use functions from ankigen_core)
|
| 345 |
generation_mode.change(
|
|
|
|
| 357 |
],
|
| 358 |
)
|
| 359 |
|
| 360 |
+
def update_total_cards_preview(topics_value: int, cards_value: int) -> str:
|
| 361 |
+
"""Update the total cards preview based on current sliders."""
|
| 362 |
+
try:
|
| 363 |
+
topics = int(topics_value)
|
| 364 |
+
cards = int(cards_value)
|
| 365 |
+
except (TypeError, ValueError):
|
| 366 |
+
return "**Total cards:** —"
|
| 367 |
+
return f"**Total cards:** {topics * cards}"
|
| 368 |
+
|
| 369 |
+
topic_number.change(
|
| 370 |
+
fn=update_total_cards_preview,
|
| 371 |
+
inputs=[topic_number, cards_per_topic],
|
| 372 |
+
outputs=[total_cards_preview],
|
| 373 |
+
)
|
| 374 |
+
cards_per_topic.change(
|
| 375 |
+
fn=update_total_cards_preview,
|
| 376 |
+
inputs=[topic_number, cards_per_topic],
|
| 377 |
+
outputs=[total_cards_preview],
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
# Define an async wrapper for the orchestrate_card_generation
|
| 381 |
async def handle_generate_click(
|
| 382 |
api_key_input_val,
|
|
|
|
| 391 |
library_topic_val,
|
| 392 |
progress=gr.Progress(track_tqdm=True),
|
| 393 |
):
|
| 394 |
+
output_df, total_html, token_html = await orchestrate_card_generation(
|
| 395 |
client_manager,
|
| 396 |
response_cache,
|
| 397 |
api_key_input_val,
|
|
|
|
| 407 |
library_name=library_name_val if library_name_val else None,
|
| 408 |
library_topic=library_topic_val if library_topic_val else None,
|
| 409 |
)
|
| 410 |
+
return output_df, total_html, token_html, gr.Tabs(selected="results")
|
| 411 |
+
|
| 412 |
+
def refresh_logs(active: bool):
|
| 413 |
+
if not active:
|
| 414 |
+
return gr.update()
|
| 415 |
+
return get_recent_logs()
|
| 416 |
+
|
| 417 |
+
log_timer.tick(
|
| 418 |
+
fn=refresh_logs,
|
| 419 |
+
inputs=[generation_active],
|
| 420 |
+
outputs=[log_output],
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
def start_generation_ui():
|
| 424 |
+
return (
|
| 425 |
+
gr.update(
|
| 426 |
+
value="**Generating cards...** This can take a bit.",
|
| 427 |
+
visible=True,
|
| 428 |
+
),
|
| 429 |
+
gr.update(interactive=False),
|
| 430 |
+
True,
|
| 431 |
+
get_recent_logs(),
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
def finish_generation_ui():
|
| 435 |
+
return (
|
| 436 |
+
gr.update(value="**Ready.**", visible=True),
|
| 437 |
+
gr.update(interactive=True),
|
| 438 |
+
False,
|
| 439 |
+
)
|
| 440 |
|
| 441 |
generate_button.click(
|
| 442 |
+
fn=start_generation_ui,
|
| 443 |
+
inputs=[],
|
| 444 |
+
outputs=[
|
| 445 |
+
status_markdown,
|
| 446 |
+
generate_button,
|
| 447 |
+
generation_active,
|
| 448 |
+
log_output,
|
| 449 |
+
],
|
| 450 |
+
).then(
|
| 451 |
fn=handle_generate_click,
|
| 452 |
inputs=[
|
| 453 |
api_key_input,
|
|
|
|
| 461 |
library_name_input,
|
| 462 |
library_topic_input,
|
| 463 |
],
|
| 464 |
+
outputs=[output, total_cards_html, token_usage_html, main_tabs],
|
| 465 |
show_progress="full",
|
| 466 |
+
).then(
|
| 467 |
+
fn=finish_generation_ui,
|
| 468 |
+
inputs=[],
|
| 469 |
+
outputs=[status_markdown, generate_button, generation_active],
|
| 470 |
)
|
| 471 |
|
| 472 |
# Define handler for CSV export (similar to APKG)
|
|
|
|
| 595 |
"""Handle auto-fill button click to populate all settings"""
|
| 596 |
if not subject_text or not subject_text.strip():
|
| 597 |
gr.Warning("Please enter a subject first")
|
| 598 |
+
return [gr.update()] * 9 # Return no updates for all outputs
|
| 599 |
|
| 600 |
if not api_key:
|
| 601 |
gr.Warning("OpenAI API key is required for auto-configuration")
|
| 602 |
+
return [gr.update()] * 9
|
| 603 |
|
| 604 |
try:
|
| 605 |
progress(0, desc="Analyzing subject...")
|
|
|
|
| 616 |
|
| 617 |
if not config:
|
| 618 |
gr.Warning("Could not generate configuration")
|
| 619 |
+
return [gr.update()] * 9
|
| 620 |
+
|
| 621 |
+
topics_value = config.get("topic_number", 3)
|
| 622 |
+
cards_value = config.get("cards_per_topic", 5)
|
| 623 |
+
total_cards_text = (
|
| 624 |
+
f"**Total cards:** {int(topics_value) * int(cards_value)}"
|
| 625 |
+
)
|
| 626 |
|
| 627 |
# Return updates for all relevant UI components
|
| 628 |
return (
|
|
|
|
| 632 |
gr.update(
|
| 633 |
value=config.get("library_topic", "")
|
| 634 |
), # library_topic_input
|
| 635 |
+
gr.update(value=topics_value), # topic_number
|
| 636 |
+
gr.update(value=cards_value), # cards_per_topic
|
| 637 |
+
gr.update(value=total_cards_text), # total_cards_preview
|
|
|
|
| 638 |
gr.update(
|
| 639 |
value=config.get("preference_prompt", "")
|
| 640 |
), # preference_prompt
|
|
|
|
| 642 |
value=config.get("generate_cloze_checkbox", False)
|
| 643 |
), # generate_cloze_checkbox
|
| 644 |
gr.update(
|
| 645 |
+
value=config.get("model_choice", "gpt-5.2-auto")
|
| 646 |
), # model_choice
|
| 647 |
gr.update(
|
| 648 |
open=True
|
|
|
|
| 652 |
except Exception as e:
|
| 653 |
logger.error(f"Auto-configuration failed: {e}", exc_info=True)
|
| 654 |
gr.Error(f"Auto-configuration failed: {str(e)}")
|
| 655 |
+
return [gr.update()] * 9
|
| 656 |
|
| 657 |
auto_fill_btn.click(
|
| 658 |
fn=handle_auto_fill_click,
|
|
|
|
| 662 |
library_topic_input,
|
| 663 |
topic_number,
|
| 664 |
cards_per_topic,
|
| 665 |
+
total_cards_preview,
|
| 666 |
preference_prompt,
|
| 667 |
generate_cloze_checkbox,
|
| 668 |
model_choice,
|
requirements.txt
CHANGED
|
@@ -2,11 +2,11 @@
|
|
| 2 |
# uv pip compile pyproject.toml -o requirements.txt
|
| 3 |
aiofiles==24.1.0
|
| 4 |
# via gradio
|
| 5 |
-
annotated-doc==0.0.
|
| 6 |
# via fastapi
|
| 7 |
annotated-types==0.7.0
|
| 8 |
# via pydantic
|
| 9 |
-
anyio==4.
|
| 10 |
# via
|
| 11 |
# gradio
|
| 12 |
# httpx
|
|
@@ -14,14 +14,14 @@ anyio==4.9.0
|
|
| 14 |
# openai
|
| 15 |
# sse-starlette
|
| 16 |
# starlette
|
| 17 |
-
attrs==25.
|
| 18 |
# via
|
| 19 |
# cyclopts
|
| 20 |
# jsonschema
|
| 21 |
# referencing
|
| 22 |
-
authlib==1.6.
|
| 23 |
# via fastmcp
|
| 24 |
-
beartype==0.22.
|
| 25 |
# via
|
| 26 |
# py-key-value-aio
|
| 27 |
# py-key-value-shared
|
|
@@ -33,31 +33,32 @@ brotli==1.2.0
|
|
| 33 |
# gradio
|
| 34 |
cached-property==2.0.1
|
| 35 |
# via genanki
|
| 36 |
-
cachetools==6.2.
|
| 37 |
# via py-key-value-aio
|
| 38 |
-
certifi==2025.
|
| 39 |
# via
|
| 40 |
# httpcore
|
| 41 |
# httpx
|
| 42 |
# requests
|
| 43 |
cffi==2.0.0
|
| 44 |
# via cryptography
|
| 45 |
-
charset-normalizer==3.4.
|
| 46 |
# via requests
|
| 47 |
chevron==0.14.0
|
| 48 |
# via genanki
|
| 49 |
-
click==8.
|
| 50 |
# via
|
| 51 |
# typer
|
|
|
|
| 52 |
# uvicorn
|
| 53 |
colorama==0.4.6
|
| 54 |
# via griffe
|
| 55 |
-
cryptography==46.0.
|
| 56 |
# via
|
| 57 |
# authlib
|
| 58 |
# pyjwt
|
| 59 |
# secretstorage
|
| 60 |
-
cyclopts==
|
| 61 |
# via fastmcp
|
| 62 |
diskcache==5.6.3
|
| 63 |
# via py-key-value-aio
|
|
@@ -67,33 +68,33 @@ dnspython==2.8.0
|
|
| 67 |
# via email-validator
|
| 68 |
docstring-parser==0.17.0
|
| 69 |
# via cyclopts
|
| 70 |
-
docutils==0.22.
|
| 71 |
# via rich-rst
|
| 72 |
email-validator==2.3.0
|
| 73 |
# via pydantic
|
| 74 |
-
exceptiongroup==1.3.
|
| 75 |
# via fastmcp
|
| 76 |
-
fastapi==0.
|
| 77 |
# via gradio
|
| 78 |
fastmcp==2.13.0
|
| 79 |
# via ankigen (pyproject.toml)
|
| 80 |
-
ffmpy==0.
|
| 81 |
# via gradio
|
| 82 |
-
filelock==3.
|
| 83 |
# via huggingface-hub
|
| 84 |
-
frozendict==2.4.
|
| 85 |
# via genanki
|
| 86 |
-
fsspec==2025.
|
| 87 |
# via
|
| 88 |
# gradio-client
|
| 89 |
# huggingface-hub
|
| 90 |
genanki==0.13.1
|
| 91 |
# via ankigen (pyproject.toml)
|
| 92 |
-
gradio==6.
|
| 93 |
# via ankigen (pyproject.toml)
|
| 94 |
gradio-client==2.0.1
|
| 95 |
# via gradio
|
| 96 |
-
griffe==1.
|
| 97 |
# via openai-agents
|
| 98 |
groovy==0.1.2
|
| 99 |
# via gradio
|
|
@@ -101,7 +102,7 @@ h11==0.16.0
|
|
| 101 |
# via
|
| 102 |
# httpcore
|
| 103 |
# uvicorn
|
| 104 |
-
hf-xet==1.
|
| 105 |
# via huggingface-hub
|
| 106 |
httpcore==1.0.9
|
| 107 |
# via httpx
|
|
@@ -110,16 +111,17 @@ httpx==0.28.1
|
|
| 110 |
# fastmcp
|
| 111 |
# gradio
|
| 112 |
# gradio-client
|
|
|
|
| 113 |
# mcp
|
| 114 |
# openai
|
| 115 |
# safehttpx
|
| 116 |
-
httpx-sse==0.4.
|
| 117 |
# via mcp
|
| 118 |
-
huggingface-hub==
|
| 119 |
# via
|
| 120 |
# gradio
|
| 121 |
# gradio-client
|
| 122 |
-
idna==3.
|
| 123 |
# via
|
| 124 |
# anyio
|
| 125 |
# email-validator
|
|
@@ -139,9 +141,9 @@ jeepney==0.9.0
|
|
| 139 |
# secretstorage
|
| 140 |
jinja2==3.1.6
|
| 141 |
# via gradio
|
| 142 |
-
jiter==0.
|
| 143 |
# via openai
|
| 144 |
-
jsonschema==4.
|
| 145 |
# via
|
| 146 |
# mcp
|
| 147 |
# openapi-core
|
|
@@ -151,24 +153,24 @@ jsonschema-path==0.3.4
|
|
| 151 |
# via
|
| 152 |
# openapi-core
|
| 153 |
# openapi-spec-validator
|
| 154 |
-
jsonschema-specifications==2025.
|
| 155 |
# via
|
| 156 |
# jsonschema
|
| 157 |
# openapi-schema-validator
|
| 158 |
-
keyring==25.
|
| 159 |
# via py-key-value-aio
|
| 160 |
lazy-object-proxy==1.12.0
|
| 161 |
# via openapi-spec-validator
|
| 162 |
lxml==6.0.2
|
| 163 |
# via ankigen (pyproject.toml)
|
| 164 |
-
markdown-it-py==
|
| 165 |
# via rich
|
| 166 |
-
markupsafe==3.0.
|
| 167 |
# via
|
| 168 |
# gradio
|
| 169 |
# jinja2
|
| 170 |
# werkzeug
|
| 171 |
-
mcp==1.
|
| 172 |
# via
|
| 173 |
# fastmcp
|
| 174 |
# openai-agents
|
|
@@ -179,17 +181,17 @@ more-itertools==10.8.0
|
|
| 179 |
# jaraco-classes
|
| 180 |
# jaraco-functools
|
| 181 |
# openapi-core
|
| 182 |
-
numpy==2.3.
|
| 183 |
# via
|
| 184 |
# gradio
|
| 185 |
# pandas
|
| 186 |
-
openai==2.
|
| 187 |
# via
|
| 188 |
# ankigen (pyproject.toml)
|
| 189 |
# openai-agents
|
| 190 |
-
openai-agents==0.6.
|
| 191 |
# via ankigen (pyproject.toml)
|
| 192 |
-
openapi-core==0.
|
| 193 |
# via fastmcp
|
| 194 |
openapi-pydantic==0.5.1
|
| 195 |
# via fastmcp
|
|
@@ -199,26 +201,24 @@ openapi-schema-validator==0.6.3
|
|
| 199 |
# openapi-spec-validator
|
| 200 |
openapi-spec-validator==0.7.2
|
| 201 |
# via openapi-core
|
| 202 |
-
orjson==3.
|
| 203 |
# via gradio
|
| 204 |
packaging==25.0
|
| 205 |
# via
|
| 206 |
# gradio
|
| 207 |
# gradio-client
|
| 208 |
# huggingface-hub
|
| 209 |
-
pandas==2.3.
|
| 210 |
# via
|
| 211 |
# ankigen (pyproject.toml)
|
| 212 |
# gradio
|
| 213 |
-
parse==1.20.2
|
| 214 |
-
# via openapi-core
|
| 215 |
pathable==0.4.4
|
| 216 |
# via jsonschema-path
|
| 217 |
pathvalidate==3.3.1
|
| 218 |
# via py-key-value-aio
|
| 219 |
-
pillow==
|
| 220 |
# via gradio
|
| 221 |
-
platformdirs==4.5.
|
| 222 |
# via fastmcp
|
| 223 |
py-key-value-aio==0.2.8
|
| 224 |
# via fastmcp
|
|
@@ -239,7 +239,7 @@ pydantic==2.12.4
|
|
| 239 |
# pydantic-settings
|
| 240 |
pydantic-core==2.41.5
|
| 241 |
# via pydantic
|
| 242 |
-
pydantic-settings==2.
|
| 243 |
# via mcp
|
| 244 |
pydub==0.25.1
|
| 245 |
# via gradio
|
|
@@ -247,21 +247,21 @@ pygments==2.19.2
|
|
| 247 |
# via rich
|
| 248 |
pyjwt==2.10.1
|
| 249 |
# via mcp
|
| 250 |
-
pyperclip==1.
|
| 251 |
# via fastmcp
|
| 252 |
python-dateutil==2.9.0.post0
|
| 253 |
# via pandas
|
| 254 |
-
python-dotenv==1.
|
| 255 |
# via
|
| 256 |
# fastmcp
|
| 257 |
# pydantic-settings
|
| 258 |
-
python-multipart==0.0.
|
| 259 |
# via
|
| 260 |
# gradio
|
| 261 |
# mcp
|
| 262 |
pytz==2025.2
|
| 263 |
# via pandas
|
| 264 |
-
pyyaml==6.0.
|
| 265 |
# via
|
| 266 |
# genanki
|
| 267 |
# gradio
|
|
@@ -272,57 +272,57 @@ referencing==0.36.2
|
|
| 272 |
# jsonschema
|
| 273 |
# jsonschema-path
|
| 274 |
# jsonschema-specifications
|
| 275 |
-
regex==
|
| 276 |
# via tiktoken
|
| 277 |
-
requests==2.32.
|
| 278 |
# via
|
| 279 |
-
# huggingface-hub
|
| 280 |
# jsonschema-path
|
| 281 |
# openai-agents
|
| 282 |
# tiktoken
|
| 283 |
rfc3339-validator==0.1.4
|
| 284 |
# via openapi-schema-validator
|
| 285 |
-
rich==14.
|
| 286 |
# via
|
| 287 |
# cyclopts
|
| 288 |
# fastmcp
|
| 289 |
# rich-rst
|
| 290 |
# typer
|
| 291 |
-
rich-rst==1.3.
|
| 292 |
# via cyclopts
|
| 293 |
-
rpds-py==0.
|
| 294 |
# via
|
| 295 |
# jsonschema
|
| 296 |
# referencing
|
| 297 |
safehttpx==0.1.7
|
| 298 |
# via gradio
|
| 299 |
-
secretstorage==3.
|
| 300 |
# via keyring
|
| 301 |
semantic-version==2.10.0
|
| 302 |
# via gradio
|
| 303 |
shellingham==1.5.4
|
| 304 |
-
# via
|
|
|
|
|
|
|
| 305 |
six==1.17.0
|
| 306 |
# via
|
| 307 |
# python-dateutil
|
| 308 |
# rfc3339-validator
|
| 309 |
sniffio==1.3.1
|
| 310 |
-
# via
|
| 311 |
-
|
| 312 |
-
# openai
|
| 313 |
-
soupsieve==2.7
|
| 314 |
# via beautifulsoup4
|
| 315 |
-
sse-starlette==
|
| 316 |
# via mcp
|
| 317 |
-
starlette==0.
|
| 318 |
# via
|
| 319 |
# ankigen (pyproject.toml)
|
| 320 |
# fastapi
|
| 321 |
# gradio
|
| 322 |
# mcp
|
|
|
|
| 323 |
tenacity==9.1.2
|
| 324 |
# via ankigen (pyproject.toml)
|
| 325 |
-
tiktoken==0.
|
| 326 |
# via ankigen (pyproject.toml)
|
| 327 |
tomlkit==0.13.3
|
| 328 |
# via gradio
|
|
@@ -330,9 +330,11 @@ tqdm==4.67.1
|
|
| 330 |
# via
|
| 331 |
# huggingface-hub
|
| 332 |
# openai
|
| 333 |
-
typer==0.
|
| 334 |
# via gradio
|
| 335 |
-
|
|
|
|
|
|
|
| 336 |
# via openai-agents
|
| 337 |
typing-extensions==4.15.0
|
| 338 |
# via
|
|
@@ -343,6 +345,7 @@ typing-extensions==4.15.0
|
|
| 343 |
# gradio
|
| 344 |
# gradio-client
|
| 345 |
# huggingface-hub
|
|
|
|
| 346 |
# openai
|
| 347 |
# openai-agents
|
| 348 |
# openapi-core
|
|
@@ -352,22 +355,24 @@ typing-extensions==4.15.0
|
|
| 352 |
# referencing
|
| 353 |
# starlette
|
| 354 |
# typer
|
|
|
|
| 355 |
# typing-inspection
|
| 356 |
typing-inspection==0.4.2
|
| 357 |
# via
|
|
|
|
| 358 |
# pydantic
|
| 359 |
# pydantic-settings
|
| 360 |
-
tzdata==2025.
|
| 361 |
# via pandas
|
| 362 |
-
urllib3==2.
|
| 363 |
# via
|
| 364 |
# requests
|
| 365 |
# types-requests
|
| 366 |
-
uvicorn==0.
|
| 367 |
# via
|
| 368 |
# gradio
|
| 369 |
# mcp
|
| 370 |
websockets==15.0.1
|
| 371 |
# via fastmcp
|
| 372 |
-
werkzeug==3.1.
|
| 373 |
# via openapi-core
|
|
|
|
| 2 |
# uv pip compile pyproject.toml -o requirements.txt
|
| 3 |
aiofiles==24.1.0
|
| 4 |
# via gradio
|
| 5 |
+
annotated-doc==0.0.4
|
| 6 |
# via fastapi
|
| 7 |
annotated-types==0.7.0
|
| 8 |
# via pydantic
|
| 9 |
+
anyio==4.12.0
|
| 10 |
# via
|
| 11 |
# gradio
|
| 12 |
# httpx
|
|
|
|
| 14 |
# openai
|
| 15 |
# sse-starlette
|
| 16 |
# starlette
|
| 17 |
+
attrs==25.4.0
|
| 18 |
# via
|
| 19 |
# cyclopts
|
| 20 |
# jsonschema
|
| 21 |
# referencing
|
| 22 |
+
authlib==1.6.6
|
| 23 |
# via fastmcp
|
| 24 |
+
beartype==0.22.9
|
| 25 |
# via
|
| 26 |
# py-key-value-aio
|
| 27 |
# py-key-value-shared
|
|
|
|
| 33 |
# gradio
|
| 34 |
cached-property==2.0.1
|
| 35 |
# via genanki
|
| 36 |
+
cachetools==6.2.4
|
| 37 |
# via py-key-value-aio
|
| 38 |
+
certifi==2025.11.12
|
| 39 |
# via
|
| 40 |
# httpcore
|
| 41 |
# httpx
|
| 42 |
# requests
|
| 43 |
cffi==2.0.0
|
| 44 |
# via cryptography
|
| 45 |
+
charset-normalizer==3.4.4
|
| 46 |
# via requests
|
| 47 |
chevron==0.14.0
|
| 48 |
# via genanki
|
| 49 |
+
click==8.3.1
|
| 50 |
# via
|
| 51 |
# typer
|
| 52 |
+
# typer-slim
|
| 53 |
# uvicorn
|
| 54 |
colorama==0.4.6
|
| 55 |
# via griffe
|
| 56 |
+
cryptography==46.0.3
|
| 57 |
# via
|
| 58 |
# authlib
|
| 59 |
# pyjwt
|
| 60 |
# secretstorage
|
| 61 |
+
cyclopts==4.4.0
|
| 62 |
# via fastmcp
|
| 63 |
diskcache==5.6.3
|
| 64 |
# via py-key-value-aio
|
|
|
|
| 68 |
# via email-validator
|
| 69 |
docstring-parser==0.17.0
|
| 70 |
# via cyclopts
|
| 71 |
+
docutils==0.22.4
|
| 72 |
# via rich-rst
|
| 73 |
email-validator==2.3.0
|
| 74 |
# via pydantic
|
| 75 |
+
exceptiongroup==1.3.1
|
| 76 |
# via fastmcp
|
| 77 |
+
fastapi==0.125.0
|
| 78 |
# via gradio
|
| 79 |
fastmcp==2.13.0
|
| 80 |
# via ankigen (pyproject.toml)
|
| 81 |
+
ffmpy==1.0.0
|
| 82 |
# via gradio
|
| 83 |
+
filelock==3.20.1
|
| 84 |
# via huggingface-hub
|
| 85 |
+
frozendict==2.4.7
|
| 86 |
# via genanki
|
| 87 |
+
fsspec==2025.12.0
|
| 88 |
# via
|
| 89 |
# gradio-client
|
| 90 |
# huggingface-hub
|
| 91 |
genanki==0.13.1
|
| 92 |
# via ankigen (pyproject.toml)
|
| 93 |
+
gradio==6.1.0
|
| 94 |
# via ankigen (pyproject.toml)
|
| 95 |
gradio-client==2.0.1
|
| 96 |
# via gradio
|
| 97 |
+
griffe==1.15.0
|
| 98 |
# via openai-agents
|
| 99 |
groovy==0.1.2
|
| 100 |
# via gradio
|
|
|
|
| 102 |
# via
|
| 103 |
# httpcore
|
| 104 |
# uvicorn
|
| 105 |
+
hf-xet==1.2.0
|
| 106 |
# via huggingface-hub
|
| 107 |
httpcore==1.0.9
|
| 108 |
# via httpx
|
|
|
|
| 111 |
# fastmcp
|
| 112 |
# gradio
|
| 113 |
# gradio-client
|
| 114 |
+
# huggingface-hub
|
| 115 |
# mcp
|
| 116 |
# openai
|
| 117 |
# safehttpx
|
| 118 |
+
httpx-sse==0.4.3
|
| 119 |
# via mcp
|
| 120 |
+
huggingface-hub==1.2.3
|
| 121 |
# via
|
| 122 |
# gradio
|
| 123 |
# gradio-client
|
| 124 |
+
idna==3.11
|
| 125 |
# via
|
| 126 |
# anyio
|
| 127 |
# email-validator
|
|
|
|
| 141 |
# secretstorage
|
| 142 |
jinja2==3.1.6
|
| 143 |
# via gradio
|
| 144 |
+
jiter==0.12.0
|
| 145 |
# via openai
|
| 146 |
+
jsonschema==4.25.1
|
| 147 |
# via
|
| 148 |
# mcp
|
| 149 |
# openapi-core
|
|
|
|
| 153 |
# via
|
| 154 |
# openapi-core
|
| 155 |
# openapi-spec-validator
|
| 156 |
+
jsonschema-specifications==2025.9.1
|
| 157 |
# via
|
| 158 |
# jsonschema
|
| 159 |
# openapi-schema-validator
|
| 160 |
+
keyring==25.7.0
|
| 161 |
# via py-key-value-aio
|
| 162 |
lazy-object-proxy==1.12.0
|
| 163 |
# via openapi-spec-validator
|
| 164 |
lxml==6.0.2
|
| 165 |
# via ankigen (pyproject.toml)
|
| 166 |
+
markdown-it-py==4.0.0
|
| 167 |
# via rich
|
| 168 |
+
markupsafe==3.0.3
|
| 169 |
# via
|
| 170 |
# gradio
|
| 171 |
# jinja2
|
| 172 |
# werkzeug
|
| 173 |
+
mcp==1.24.0
|
| 174 |
# via
|
| 175 |
# fastmcp
|
| 176 |
# openai-agents
|
|
|
|
| 181 |
# jaraco-classes
|
| 182 |
# jaraco-functools
|
| 183 |
# openapi-core
|
| 184 |
+
numpy==2.3.5
|
| 185 |
# via
|
| 186 |
# gradio
|
| 187 |
# pandas
|
| 188 |
+
openai==2.14.0
|
| 189 |
# via
|
| 190 |
# ankigen (pyproject.toml)
|
| 191 |
# openai-agents
|
| 192 |
+
openai-agents==0.6.4
|
| 193 |
# via ankigen (pyproject.toml)
|
| 194 |
+
openapi-core==0.21.0
|
| 195 |
# via fastmcp
|
| 196 |
openapi-pydantic==0.5.1
|
| 197 |
# via fastmcp
|
|
|
|
| 201 |
# openapi-spec-validator
|
| 202 |
openapi-spec-validator==0.7.2
|
| 203 |
# via openapi-core
|
| 204 |
+
orjson==3.11.5
|
| 205 |
# via gradio
|
| 206 |
packaging==25.0
|
| 207 |
# via
|
| 208 |
# gradio
|
| 209 |
# gradio-client
|
| 210 |
# huggingface-hub
|
| 211 |
+
pandas==2.3.3
|
| 212 |
# via
|
| 213 |
# ankigen (pyproject.toml)
|
| 214 |
# gradio
|
|
|
|
|
|
|
| 215 |
pathable==0.4.4
|
| 216 |
# via jsonschema-path
|
| 217 |
pathvalidate==3.3.1
|
| 218 |
# via py-key-value-aio
|
| 219 |
+
pillow==12.0.0
|
| 220 |
# via gradio
|
| 221 |
+
platformdirs==4.5.1
|
| 222 |
# via fastmcp
|
| 223 |
py-key-value-aio==0.2.8
|
| 224 |
# via fastmcp
|
|
|
|
| 239 |
# pydantic-settings
|
| 240 |
pydantic-core==2.41.5
|
| 241 |
# via pydantic
|
| 242 |
+
pydantic-settings==2.12.0
|
| 243 |
# via mcp
|
| 244 |
pydub==0.25.1
|
| 245 |
# via gradio
|
|
|
|
| 247 |
# via rich
|
| 248 |
pyjwt==2.10.1
|
| 249 |
# via mcp
|
| 250 |
+
pyperclip==1.11.0
|
| 251 |
# via fastmcp
|
| 252 |
python-dateutil==2.9.0.post0
|
| 253 |
# via pandas
|
| 254 |
+
python-dotenv==1.2.1
|
| 255 |
# via
|
| 256 |
# fastmcp
|
| 257 |
# pydantic-settings
|
| 258 |
+
python-multipart==0.0.21
|
| 259 |
# via
|
| 260 |
# gradio
|
| 261 |
# mcp
|
| 262 |
pytz==2025.2
|
| 263 |
# via pandas
|
| 264 |
+
pyyaml==6.0.3
|
| 265 |
# via
|
| 266 |
# genanki
|
| 267 |
# gradio
|
|
|
|
| 272 |
# jsonschema
|
| 273 |
# jsonschema-path
|
| 274 |
# jsonschema-specifications
|
| 275 |
+
regex==2025.11.3
|
| 276 |
# via tiktoken
|
| 277 |
+
requests==2.32.5
|
| 278 |
# via
|
|
|
|
| 279 |
# jsonschema-path
|
| 280 |
# openai-agents
|
| 281 |
# tiktoken
|
| 282 |
rfc3339-validator==0.1.4
|
| 283 |
# via openapi-schema-validator
|
| 284 |
+
rich==14.2.0
|
| 285 |
# via
|
| 286 |
# cyclopts
|
| 287 |
# fastmcp
|
| 288 |
# rich-rst
|
| 289 |
# typer
|
| 290 |
+
rich-rst==1.3.2
|
| 291 |
# via cyclopts
|
| 292 |
+
rpds-py==0.30.0
|
| 293 |
# via
|
| 294 |
# jsonschema
|
| 295 |
# referencing
|
| 296 |
safehttpx==0.1.7
|
| 297 |
# via gradio
|
| 298 |
+
secretstorage==3.5.0
|
| 299 |
# via keyring
|
| 300 |
semantic-version==2.10.0
|
| 301 |
# via gradio
|
| 302 |
shellingham==1.5.4
|
| 303 |
+
# via
|
| 304 |
+
# huggingface-hub
|
| 305 |
+
# typer
|
| 306 |
six==1.17.0
|
| 307 |
# via
|
| 308 |
# python-dateutil
|
| 309 |
# rfc3339-validator
|
| 310 |
sniffio==1.3.1
|
| 311 |
+
# via openai
|
| 312 |
+
soupsieve==2.8.1
|
|
|
|
|
|
|
| 313 |
# via beautifulsoup4
|
| 314 |
+
sse-starlette==3.0.4
|
| 315 |
# via mcp
|
| 316 |
+
starlette==0.50.0
|
| 317 |
# via
|
| 318 |
# ankigen (pyproject.toml)
|
| 319 |
# fastapi
|
| 320 |
# gradio
|
| 321 |
# mcp
|
| 322 |
+
# sse-starlette
|
| 323 |
tenacity==9.1.2
|
| 324 |
# via ankigen (pyproject.toml)
|
| 325 |
+
tiktoken==0.12.0
|
| 326 |
# via ankigen (pyproject.toml)
|
| 327 |
tomlkit==0.13.3
|
| 328 |
# via gradio
|
|
|
|
| 330 |
# via
|
| 331 |
# huggingface-hub
|
| 332 |
# openai
|
| 333 |
+
typer==0.20.0
|
| 334 |
# via gradio
|
| 335 |
+
typer-slim==0.20.0
|
| 336 |
+
# via huggingface-hub
|
| 337 |
+
types-requests==2.32.4.20250913
|
| 338 |
# via openai-agents
|
| 339 |
typing-extensions==4.15.0
|
| 340 |
# via
|
|
|
|
| 345 |
# gradio
|
| 346 |
# gradio-client
|
| 347 |
# huggingface-hub
|
| 348 |
+
# mcp
|
| 349 |
# openai
|
| 350 |
# openai-agents
|
| 351 |
# openapi-core
|
|
|
|
| 355 |
# referencing
|
| 356 |
# starlette
|
| 357 |
# typer
|
| 358 |
+
# typer-slim
|
| 359 |
# typing-inspection
|
| 360 |
typing-inspection==0.4.2
|
| 361 |
# via
|
| 362 |
+
# mcp
|
| 363 |
# pydantic
|
| 364 |
# pydantic-settings
|
| 365 |
+
tzdata==2025.3
|
| 366 |
# via pandas
|
| 367 |
+
urllib3==2.6.2
|
| 368 |
# via
|
| 369 |
# requests
|
| 370 |
# types-requests
|
| 371 |
+
uvicorn==0.38.0
|
| 372 |
# via
|
| 373 |
# gradio
|
| 374 |
# mcp
|
| 375 |
websockets==15.0.1
|
| 376 |
# via fastmcp
|
| 377 |
+
werkzeug==3.1.4
|
| 378 |
# via openapi-core
|