Spaces:
Sleeping
Sleeping
File size: 5,988 Bytes
0f0f77d f618fe2 03f54fd f618fe2 3def084 edf06e3 22848f7 f618fe2 22848f7 f618fe2 1146eca edf06e3 03f54fd 1146eca 03f54fd 1146eca f618fe2 1146eca edf06e3 1146eca edf06e3 f618fe2 0f7a3c5 806fc86 08709e7 0f7a3c5 f618fe2 08709e7 f618fe2 0f7a3c5 f618fe2 300284a 0f7a3c5 300284a ca1ba9e f618fe2 1146eca edf06e3 f618fe2 1146eca 03f54fd edf06e3 f618fe2 1146eca f618fe2 edf06e3 f618fe2 08709e7 f618fe2 0f7a3c5 08709e7 f618fe2 1146eca 0f0f77d f618fe2 0f0f77d f618fe2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
import time
# --- Configuration ---
MODEL_REPO = "Kezovic/iris-q4gguf-baseline-10k"#iris-f16gguf-test" #iris-q4gguf-lora-test" #iris-q4gguf-baseline-10k"
MODEL_FILE = "llama-3.2-1b-instruct.Q4_K_M.gguf"#llama-3.2-1b-instruct.F16.gguf"#Llama-3.2-1B-Instruct.Q4_K_M.gguf"#llama-3.2-1b-instruct.Q4_K_M.gguf"
CONTEXT_WINDOW = 2048
MAX_NEW_TOKENS = 400
TEMPERATURE = 1.5
# --- Model Loading ---
llm = None
def load_llm():
global llm
print("Downloading model...")
try:
model_path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)
llm = Llama(
model_path=model_path,
n_ctx=CONTEXT_WINDOW,
n_threads=2,
verbose=False,
min_p = 0.1
)
print("Model loaded successfully!")
except Exception as e:
print(f"Error loading model: {e}")
load_llm()
# --- Generation Function ---
def generate_poem(format_type, persona, topic, progress=gr.Progress()):
# 1. VISUAL FEEDBACK: Immediately show the bar
# This now works because the Textbox below has a fixed height!
progress(0, desc="Consulting the Muse...")
time.sleep(0.2) # Force a tiny pause so the eye catches the bar
if not llm:
return "Error: Model not loaded."
if not topic:
return "Please enter a topic!"
# 2. Progress Update
progress(0.2, desc=f"Summoning {persona}...")
time.sleep(0.3)
persona_map = {
"Grumpy Pirate": "You are a grumpy and annoyed pirate captain. Use salty nautical slang, complain about the sea and use 'Arrr' and 'matey'.",
"Philosopher": "You are a philosopher. Use metaphors and reflective musings about the human condition.",
"Ancient Wizard": "You are an ancient wizard. Speak in mystical tones. Use references to magic.",
"Shakespearean Actor": "You are a Shakespearean actor. Use Early Modern English (thee, thou) and dramatic flair.",
"Sarcastic Teenager": "You are a sarcastic teenager. Use dry humor."
}
format_map = {
"Free Verse": "Write in Free Verse about the topic. Focus on vivid imagery and emotion.",
"Ballad": "Write a Ballad about the topic. It should tell a narrative story with a clear beginning, middle, and end. Use four-line stanzas (quatrains) and a simple rhyme scheme like A-B-C-B. Keep the language simple and musical.",
"Ode": "Write an Ode about the topic. Use elevated, expressive language to praise or celebrate the subject. Focus on strong emotion, rich imagery, and admiration.",
"Elegy": "Write an Elegy about the topic. Use a somber, reflective tone to mourn a loss or contemplate death. Focus on sorrow, remembrance, and emotional depth.",
"Hymn": "Write a Hymn about the topic. Use a reverent, uplifting tone. Focus on praise, devotion, or spiritual reflection, with a rhythmic, chant-like flow.",
"Epic": "Write an Epic about the topic. Use grand, dramatic language to tell a heroic story."
}
selected_voice = persona_map.get(persona, "You are a helpful assistant.")
selected_constraint = format_map.get(format_type, "Write a poem.")
full_prompt = (
f"Write a poem with rhymes.\n"
f"{selected_voice}\n"
f"{selected_constraint}\n"
f"Your response should only contain the poem.\n"
f"The {format_type} should focus on this topic: '{topic}'.\n\n"
)
# 3. Progress Update
progress(0.4, desc="Drafting Masterpiece...")
output = llm(
prompt=full_prompt,
max_tokens=MAX_NEW_TOKENS,
temperature=TEMPERATURE,
stop=["### Instruction:", "### Human:"],
echo=False
)
# 4. Progress Update
progress(0.9, desc="Polishing rhymes...")
time.sleep(0.2)
return output['choices'][0]['text'].strip()
# --- UI Layout ---
with gr.Blocks(title="The Poetry Workshop", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🖋️ The Poetry Workshop")
gr.Markdown("Your own personal muse to help you get started with poetry.")
with gr.Group():
with gr.Row(equal_height=True):
gr.Markdown("### I want to write a ")
format_dropdown = gr.Dropdown(
choices=["Ballad","Ode","Elegy","Hymn","Epic", "Free Verse"],
value="Ballad",
label="Poem Type",
show_label=False,
container=False,
scale=2
)
gr.Markdown("### in the style of ")
persona_dropdown = gr.Dropdown(
choices=["Grumpy Pirate", "Philosopher", "Shakespearean Actor", "Ancient Wizard", "Sarcastic Teenager"],
value="Grumpy Pirate",
label="Persona",
show_label=False,
container=False,
scale=3
)
with gr.Row(equal_height=True):
gr.Markdown("### about this topic: ")
topic_input = gr.Textbox(
placeholder="e.g., my broken laptop, the smell of rain, a lost sock",
label="Topic",
show_label=False,
scale=5
)
generate_btn = gr.Button("✨ Create Masterpiece", variant="primary", scale=1)
gr.Markdown("---")
# --- CRITICAL CHANGE HERE ---
# Swapped gr.Markdown for gr.Textbox.
# 'lines=10' forces the box to be visible immediately, so the loading bar has a place to live.
output_display = gr.Textbox(
label="Your Poem",
lines=12, # Ensures a big empty box exists on load
interactive=False, # Read-only
placeholder="Your masterpiece will appear here..."
)
generate_btn.click(
fn=generate_poem,
inputs=[format_dropdown, persona_dropdown, topic_input],
outputs=[output_display]
)
if __name__ == "__main__":
demo.launch() |