Spaces:
Runtime error
Runtime error
File size: 5,010 Bytes
0344d32 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import gradio as gr
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
import os
# Model configuration
MODEL_REPO = "tiiuae/Falcon3-1B-Instruct-GGUF"
MODEL_FILE = "falcon3-1b-instruct-q4_k_m.gguf"
# Download and load model
print("Downloading model...")
model_path = hf_hub_download(
repo_id=MODEL_REPO,
filename=MODEL_FILE,
cache_dir="./models"
)
print("Loading model...")
llm = Llama(
model_path=model_path,
n_ctx=2048,
n_threads=4,
n_gpu_layers=0 # Set to higher value if GPU available
)
def generate_ppt_script(topic, num_slides=5, temperature=0.7, max_tokens=1500):
"""Generate a PowerPoint presentation script for the given topic."""
prompt = f"""You are a professional presentation writer. Create a detailed PowerPoint presentation script for the topic: "{topic}"
Generate a script with {num_slides} slides. For each slide, provide:
1. Slide number and title
2. Key points to include (3-5 bullet points)
3. Speaker notes/talking points
Format the output clearly with slide numbers and sections.
Presentation Script:"""
# Generate response
output = llm(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=0.9,
repeat_penalty=1.1,
stop=["</s>", "User:", "Human:"],
echo=False
)
response = output['choices'][0]['text'].strip()
return response
def create_interface():
"""Create the Gradio interface."""
with gr.Blocks(theme=gr.themes.Soft(), title="PPT Script Generator") as demo:
gr.Markdown(
"""
# 📊 PowerPoint Script Generator
### Powered by Falcon3-1B-Instruct
Generate professional presentation scripts for any topic using AI.
Simply enter your topic and get a complete slide-by-slide script!
"""
)
with gr.Row():
with gr.Column(scale=1):
topic_input = gr.Textbox(
label="Presentation Topic",
placeholder="e.g., 'Introduction to Artificial Intelligence', 'Climate Change Solutions', 'Digital Marketing Strategies'",
lines=2
)
with gr.Row():
num_slides = gr.Slider(
minimum=3,
maximum=10,
value=5,
step=1,
label="Number of Slides"
)
temperature = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.1,
label="Creativity (Temperature)"
)
max_tokens = gr.Slider(
minimum=500,
maximum=3000,
value=1500,
step=100,
label="Maximum Length (tokens)"
)
generate_btn = gr.Button("🎯 Generate Script", variant="primary", size="lg")
gr.Markdown(
"""
### Tips:
- Be specific with your topic for better results
- Use 5-7 slides for standard presentations
- Higher temperature = more creative output
- Adjust max tokens if output is cut off
"""
)
with gr.Column(scale=2):
output = gr.Textbox(
label="Generated Presentation Script",
lines=25,
show_copy_button=True
)
# Examples
gr.Examples(
examples=[
["Introduction to Machine Learning", 5, 0.7, 1500],
["The Future of Renewable Energy", 6, 0.8, 1800],
["Effective Team Management Strategies", 5, 0.7, 1500],
["Blockchain Technology Explained", 7, 0.7, 2000],
["Mental Health in the Workplace", 5, 0.6, 1500],
],
inputs=[topic_input, num_slides, temperature, max_tokens],
label="Example Topics"
)
# Connect the button to the function
generate_btn.click(
fn=generate_ppt_script,
inputs=[topic_input, num_slides, temperature, max_tokens],
outputs=output
)
gr.Markdown(
"""
---
**Note:** This app uses the Falcon3-1B-Instruct model in GGUF format for efficient CPU inference.
Generation may take 30-60 seconds depending on the length requested.
"""
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch(server_name="0.0.0.0", server_port=7860)
|