promptforge / backend /instruction_store.py
Really-amin's picture
Upload PromptForge v1.0 β€” Structured prompt generator for Google AI Studio
ea65915 verified
"""
PromptForge v4.0 β€” Instruction Settings store.
Upgrades: duplicate setting, export-all-as-JSON, bulk-delete,
favorite toggle, and richer seeded defaults.
"""
from __future__ import annotations
import json, os, logging, uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from schemas import (
InstructionSettings, InstructionSettingsCreate, InstructionSettingsUpdate,
AIProvider, OutputFormat, PersonaType, StyleType,
)
logger = logging.getLogger("promptforge.instruction_store")
_DB: Dict[str, InstructionSettings] = {}
_LOG_DIR = Path(os.getenv("LOG_DIR", "logs"))
_LOG_DIR.mkdir(parents=True, exist_ok=True)
_PERSIST_FILE = _LOG_DIR / "instruction_settings.json"
# ── CRUD ─────────────────────────────────────────────────────────────────────
def create(data: InstructionSettingsCreate) -> InstructionSettings:
sid = str(uuid.uuid4())
now = datetime.utcnow()
setting = InstructionSettings(
settings_id=sid,
created_at=now,
updated_at=now,
use_count=0,
version=1,
**data.model_dump(),
)
_DB[sid] = setting
_persist()
logger.info("CREATED setting | id=%s title=%r", sid, setting.title)
return setting
def get(settings_id: str) -> Optional[InstructionSettings]:
return _DB.get(settings_id)
def list_all(
tag: Optional[str] = None,
favorites_only: bool = False,
search_q: Optional[str] = None,
) -> List[InstructionSettings]:
items = sorted(_DB.values(), key=lambda x: (not x.is_favorite, x.updated_at.timestamp() * -1))
if tag:
items = [i for i in items if tag in i.tags]
if favorites_only:
items = [i for i in items if i.is_favorite]
if search_q:
q = search_q.lower()
items = [i for i in items if q in (i.title + i.instruction + (i.description or "")).lower()]
return items
def update(settings_id: str, data: InstructionSettingsUpdate) -> Optional[InstructionSettings]:
existing = _DB.get(settings_id)
if not existing:
return None
patch = {k: v for k, v in data.model_dump().items() if v is not None}
patch["updated_at"] = datetime.utcnow()
patch["version"] = existing.version + 1
updated = existing.model_copy(update=patch)
_DB[settings_id] = updated
_persist()
logger.info("UPDATED setting | id=%s v%d", settings_id, updated.version)
return updated
def delete(settings_id: str) -> bool:
if settings_id in _DB:
del _DB[settings_id]
_persist()
logger.info("DELETED setting | id=%s", settings_id)
return True
return False
def bulk_delete(settings_ids: List[str]) -> int:
deleted = 0
for sid in settings_ids:
if sid in _DB:
del _DB[sid]
deleted += 1
if deleted:
_persist()
return deleted
def duplicate(settings_id: str) -> Optional[InstructionSettings]:
"""Create a copy of an existing setting with '(copy)' suffix."""
original = _DB.get(settings_id)
if not original:
return None
data = InstructionSettingsCreate(**{
**original.model_dump(),
"title": f"{original.title} (copy)",
"is_favorite": False,
})
return create(data)
def toggle_favorite(settings_id: str) -> Optional[bool]:
s = _DB.get(settings_id)
if not s:
return None
new_val = not s.is_favorite
_DB[settings_id] = s.model_copy(update={"is_favorite": new_val, "updated_at": datetime.utcnow()})
_persist()
return new_val
def increment_use_count(settings_id: str) -> None:
if settings_id in _DB:
s = _DB[settings_id]
_DB[settings_id] = s.model_copy(
update={"use_count": s.use_count + 1, "updated_at": datetime.utcnow()}
)
_persist()
def export_all() -> List[dict]:
"""Return all settings as a list of dicts (for bulk JSON export)."""
return [s.model_dump(mode="json") for s in sorted(_DB.values(), key=lambda x: x.created_at)]
def get_all_tags() -> List[str]:
tags = sorted({tag for s in _DB.values() for tag in s.tags})
return tags
# ── Persistence ───────────────────────────────────────────────────────────────
def _persist() -> None:
try:
data = [s.model_dump(mode="json") for s in _DB.values()]
_PERSIST_FILE.write_text(json.dumps(data, indent=2, default=str))
except Exception as exc:
logger.warning("Could not persist instruction store: %s", exc)
def load_from_disk() -> None:
if not _PERSIST_FILE.exists():
_seed_defaults()
return
try:
raw = json.loads(_PERSIST_FILE.read_text())
for entry in raw:
s = InstructionSettings.model_validate(entry)
_DB[s.settings_id] = s
logger.info("Loaded %d settings from disk.", len(_DB))
except Exception as exc:
logger.warning("Could not load instruction settings: %s", exc)
_seed_defaults()
def _seed_defaults() -> None:
defaults = [
InstructionSettingsCreate(
title="React TypeScript Component",
description="Generates a reusable TypeScript React component with TailwindCSS styling and Jest tests.",
instruction="Create a reusable TypeScript React component with TailwindCSS styling, a full props interface with JSDoc comments, and Jest + Testing Library unit tests. Include a Storybook story.",
extra_context="Follow React 18 best practices: hooks, memo, forwardRef where needed. Support dark mode via Tailwind dark: prefix.",
output_format=OutputFormat.both,
persona=PersonaType.senior_dev,
style=StyleType.professional,
constraints=["TypeScript strict mode", "WCAG 2.1 AA accessibility", "Include PropTypes documentation", "Responsive design"],
tags=["react", "typescript", "frontend", "tailwind"],
provider=AIProvider.none,
enhance=False,
is_favorite=True,
),
InstructionSettingsCreate(
title="FastAPI Endpoint",
description="Generates a FastAPI endpoint with Pydantic validation, error handling, and tests.",
instruction="Create a FastAPI endpoint with Pydantic v2 request/response models, input validation, structured error handling, OpenAPI metadata (tags, summary, description), and pytest async tests.",
extra_context="Follow REST best practices. Use dependency injection. Include an integration test against a test database.",
output_format=OutputFormat.both,
persona=PersonaType.senior_dev,
style=StyleType.detailed,
constraints=["Python 3.11+", "PEP-8 compliant", "Type hints everywhere", "Include async tests", "Structured logging"],
tags=["python", "fastapi", "backend", "testing"],
provider=AIProvider.none,
enhance=False,
is_favorite=True,
),
InstructionSettingsCreate(
title="Technical Blog Post",
description="Creates a structured technical article with code examples and actionable takeaways.",
instruction="Write a technical blog post explaining the concept with clear H2/H3 sections, at least two runnable code examples, a TL;DR opener, and concrete actionable takeaways at the end.",
output_format=OutputFormat.text,
persona=PersonaType.tech_writer,
style=StyleType.detailed,
constraints=["800–1200 words", "Include β‰₯2 code examples", "Add a TL;DR section", "Link to further reading"],
tags=["writing", "technical", "blog"],
provider=AIProvider.none,
enhance=False,
),
InstructionSettingsCreate(
title="SQL Query Optimiser",
description="Analyses and rewrites slow SQL queries with explanations.",
instruction="Analyse the provided SQL query for performance issues, rewrite it with optimisations (indexes, joins, CTEs), and explain every change with Big-O reasoning.",
extra_context="Target PostgreSQL 15+. Use EXPLAIN ANALYZE format for before/after comparison.",
output_format=OutputFormat.both,
persona=PersonaType.data_scientist,
style=StyleType.detailed,
constraints=["PostgreSQL 15+", "Include EXPLAIN ANALYZE output", "Document index strategy"],
tags=["sql", "database", "performance"],
provider=AIProvider.none,
enhance=False,
),
InstructionSettingsCreate(
title="Docker + CI/CD Setup",
description="Generates multi-stage Dockerfile and GitHub Actions workflow.",
instruction="Create a production-ready multi-stage Dockerfile and a GitHub Actions CI/CD pipeline with lint, test, build, and deploy stages. Include security scanning with Trivy.",
output_format=OutputFormat.both,
persona=PersonaType.devops_eng,
style=StyleType.professional,
constraints=["Multi-stage build", "Non-root user", "Health checks", "Cache layer optimisation", "Secrets via GitHub Secrets"],
tags=["docker", "devops", "ci/cd", "security"],
provider=AIProvider.none,
enhance=False,
),
InstructionSettingsCreate(
title="LLM Fine-Tuning Guide",
description="Step-by-step guide for fine-tuning an open-source LLM.",
instruction="Write a step-by-step guide for fine-tuning an open-source LLM using LoRA/QLoRA on a custom dataset. Cover data preparation, training config, evaluation metrics, and deployment.",
extra_context="Target Hugging Face + PEFT + bitsandbytes stack. Assume 1Γ— A100 or equivalent GPU.",
output_format=OutputFormat.both,
persona=PersonaType.ml_engineer,
style=StyleType.detailed,
constraints=["Hugging Face ecosystem", "LoRA/QLoRA", "Include evaluation with BLEU/ROUGE", "Deployment section"],
tags=["ml", "llm", "fine-tuning", "huggingface"],
provider=AIProvider.none,
enhance=False,
),
]
for d in defaults:
create(d)
logger.info("Seeded %d default instruction settings.", len(defaults))