SlideMaker / app.py
eaglelandsonce's picture
Update app.py
b38d998 verified
import os
from pathlib import Path
import gradio as gr
from ctransformers import AutoModelForCausalLM
from huggingface_hub import hf_hub_download
# -----------------------------
# Configuration
# -----------------------------
# Option A: local file path inside your Space repo (recommended folder: "models/")
LOCAL_MODEL_FILE = os.environ.get("LOCAL_MODEL_FILE", "models/falcon3-1b-instruct-q4_k_m.gguf")
# Option B: download from Hugging Face Hub (set these in Space Variables)
MODEL_REPO_ID = os.environ.get("MODEL_REPO_ID", "") # e.g. "TheBloke/Falcon3-1B-Instruct-GGUF"
MODEL_FILENAME = os.environ.get("MODEL_FILENAME", "") # e.g. "falcon3-1b-instruct-q4_k_m.gguf"
# Inference knobs
MODEL_TYPE = os.environ.get("MODEL_TYPE", "falcon")
GPU_LAYERS = int(os.environ.get("GPU_LAYERS", "0"))
CONTEXT_LENGTH = int(os.environ.get("CONTEXT_LENGTH", "4096"))
# -----------------------------
# Resolve model path
# -----------------------------
APP_DIR = Path(__file__).resolve().parent
local_path = (APP_DIR / LOCAL_MODEL_FILE).resolve()
def resolve_model_path() -> str:
# 1) Prefer local file (if you committed it to the Space repo)
if local_path.exists():
return str(local_path)
# 2) Otherwise download from Hub (requires MODEL_REPO_ID + MODEL_FILENAME)
if not MODEL_REPO_ID or not MODEL_FILENAME:
raise FileNotFoundError(
f"Model not found locally at: {local_path}\n\n"
"AND Hub download is not configured.\n"
"Fix ONE of these:\n"
"A) Commit the GGUF into your Space repo (recommended: put it in /models).\n"
" - Ensure it exists at the path in LOCAL_MODEL_FILE.\n"
"B) Configure Hub download:\n"
" - In Space Settings > Variables:\n"
" MODEL_REPO_ID = <your repo_id>\n"
" MODEL_FILENAME = <your .gguf filename>\n"
" - If repo is gated/private, add Space Secret:\n"
" HF_TOKEN = <your HF access token>\n"
)
token = os.environ.get("HF_TOKEN") # optional; required for private/gated repos
downloaded = hf_hub_download(
repo_id=MODEL_REPO_ID,
filename=MODEL_FILENAME,
token=token,
)
return downloaded
MODEL_PATH = resolve_model_path()
# -----------------------------
# Load model
# -----------------------------
llm = AutoModelForCausalLM.from_pretrained(
MODEL_PATH,
model_type=MODEL_TYPE,
gpu_layers=GPU_LAYERS,
context_length=CONTEXT_LENGTH,
)
# -----------------------------
# Prompt template
# -----------------------------
SYSTEM_PROMPT = """
You are an expert instructional designer who writes clear,
engaging PowerPoint scripts with slide titles, bullet points,
and speaker notes.
""".strip()
def build_prompt(topic, slide_count):
return f"""{SYSTEM_PROMPT}
Write a complete PowerPoint script with EXACTLY {slide_count} slides.
Topic: {topic}
Return the output in this structure:
Title Slide:
- Title:
- Subtitle:
- Speaker Notes:
Slide 1:
- Slide Title:
- Bullet Points:
- Speaker Notes:
Slide 2:
- Slide Title:
- Bullet Points:
- Speaker Notes:
Continue until you reach Slide {slide_count}.
"""
# -----------------------------
# Generation function
# -----------------------------
def generate_script(topic, slide_count, max_tokens, temperature, top_p):
topic = (topic or "").strip()
if not topic:
return "Please enter a topic."
try:
slide_count = int(slide_count)
except Exception:
return "Number of slides must be a whole number."
if slide_count < 1 or slide_count > 50:
return "Number of slides must be between 1 and 50."
prompt = build_prompt(topic, slide_count)
output = ""
for token in llm(
prompt,
max_new_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
stream=True,
):
output += token
return output.strip()
# -----------------------------
# Gradio UI
# -----------------------------
def build_ui():
with gr.Blocks(title="Falcon3-1B PPT Script Generator") as demo:
gr.Markdown(
"""
# Falcon3-1B-Instruct (GGUF)
### PowerPoint Script Generator (Topic + Slide Count)
Provide a topic and the number of slides you want.
The model will generate a structured slide-by-slide script.
"""
)
with gr.Row():
with gr.Column():
topic = gr.Textbox(label="Topic", placeholder="e.g., AI Maturity Model")
slide_count = gr.Number(label="Number of Slides", value=8, precision=0)
max_tokens = gr.Slider(256, 4096, value=1500, label="Max Tokens")
temperature = gr.Slider(0.1, 1.5, value=0.7, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.9, label="Top-p")
generate_btn = gr.Button("Generate Script")
with gr.Column():
output = gr.Textbox(label="Generated PowerPoint Script", lines=25)
generate_btn.click(
generate_script,
inputs=[topic, slide_count, max_tokens, temperature, top_p],
outputs=output,
)
return demo
demo = build_ui()
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))