Wizz13150 commited on
Commit
42d184c
·
1 Parent(s): b8a5462

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -34
app.py CHANGED
@@ -2,13 +2,32 @@ import gradio as gr
2
  import random
3
  from llama_cpp import Llama
4
 
5
- # Charger le modèle
6
- llm = Llama(
7
- model_path="model/WizzGPTv7-q8_0.gguf",
8
- n_threads=2,
9
- n_ctx=1024,
10
- use_mlock=True
11
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Liste complète des débuts de prompt
14
  prompt_list = [
@@ -285,7 +304,7 @@ prompt_list = [
285
  "A visually arresting photographic study of "
286
  ]
287
 
288
- # Generation function
289
  def generate_completions(prompt, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty):
290
  results = []
291
  for _ in range(n_responses):
@@ -302,7 +321,7 @@ def generate_completions(prompt, n_responses, max_tokens, temperature, top_p, to
302
  results.append(f"{prompt}{text}")
303
  return "\n\n".join(results)
304
 
305
- # UI logic
306
  def set_random_prompt(n):
307
  selected = random.choice(prompt_list)
308
  return selected, f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"
@@ -310,22 +329,21 @@ def set_random_prompt(n):
310
  def set_prompt_from_dropdown(choice, n):
311
  return choice, f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"
312
 
313
- # Gradio Interface
314
- with gr.Blocks(css=".gr-button { font-size: 16px; }") as demo:
315
- gr.Markdown("""
316
- # ✨ WizzGPTv7 — Text Completion (GGUF - CPU)
317
 
318
- **Lightweight GGUF model (Q8_0), optimized for CPU.**
319
- Works in *completion* mode (not chatbot), returning full prompts + outputs.
 
320
 
321
- - Click `🎲 Random Prompt` for a random start
322
- - Use the dropdown to pick from hundreds of prompt starters
323
- - Adjust generation settings with the sliders below
324
- """)
325
 
326
  with gr.Group():
327
  with gr.Row():
328
- prompt_input = gr.Textbox(label="📝 Prompt", lines=3, placeholder="Type a prompt or pick one from the list...")
329
  with gr.Row():
330
  prompt_dropdown = gr.Dropdown(choices=prompt_list, label="📌 Prompt Examples", scale=2)
331
  random_button = gr.Button("🎲 Random Prompt", scale=1)
@@ -335,26 +353,20 @@ with gr.Blocks(css=".gr-button { font-size: 16px; }") as demo:
335
 
336
  with gr.Accordion("⚙️ Advanced Settings", open=False):
337
  max_tokens = gr.Slider(10, 200, value=75, step=5, label="Max Tokens")
338
- temperature = gr.Slider(0.1, 2.0, value=1.1, step=0.05, label="Temperature")
339
  top_p = gr.Slider(0.0, 1.0, value=0.90, step=0.05, label="Top-p")
340
- top_k = gr.Slider(0, 128, value=40, step=1, label="Top-k")
341
- repeat_penalty = gr.Slider(0.5, 2.0, value=1.32, step=0.05, label="Repeat Penalty")
342
 
343
  generate_button = gr.Button("🚀 Generate Prompt(s)")
344
  output = gr.Textbox(label="📄 Completions", lines=12)
345
 
346
- # Reactive logic
347
  prompt_dropdown.change(set_prompt_from_dropdown, [prompt_dropdown, n_responses], [prompt_input, random_button, generate_button])
348
  random_button.click(set_random_prompt, inputs=n_responses, outputs=[prompt_input, random_button, generate_button])
349
- n_responses.change(
350
- lambda n: (f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"),
351
- inputs=n_responses,
352
- outputs=[random_button, generate_button]
353
- )
354
- generate_button.click(
355
- fn=generate_completions,
356
- inputs=[prompt_input, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty],
357
- outputs=output
358
- )
359
 
360
  demo.launch()
 
2
  import random
3
  from llama_cpp import Llama
4
 
5
+ # Liste des modèles disponibles
6
+ model_versions = {
7
+ "WizzGPTv2": "model/WizzGPTv2-q8_0.gguf",
8
+ "WizzGPTv3": "model/WizzGPTv3-q8_0.gguf",
9
+ "WizzGPTv4": "model/WizzGPTv4-q8_0.gguf",
10
+ "WizzGPTv5": "model/WizzGPTv5-q8_0.gguf",
11
+ "WizzGPTv6": "model/WizzGPTv6-q8_0.gguf",
12
+ "WizzGPTv7": "model/WizzGPTv7-q8_0.gguf" # Par défaut
13
+ }
14
+
15
+ # Modèle initial (v7 par défaut)
16
+ llm = None
17
+ current_model = "WizzGPTv7"
18
+
19
+ def load_model(model_name):
20
+ global llm, current_model
21
+ current_model = model_name
22
+ llm = Llama(
23
+ model_path=model_versions[model_name],
24
+ n_threads=2,
25
+ n_ctx=1024,
26
+ use_mlock=True
27
+ )
28
+
29
+ # Charger modèle initial
30
+ load_model(current_model)
31
 
32
  # Liste complète des débuts de prompt
33
  prompt_list = [
 
304
  "A visually arresting photographic study of "
305
  ]
306
 
307
+ # Fonction de génération
308
  def generate_completions(prompt, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty):
309
  results = []
310
  for _ in range(n_responses):
 
321
  results.append(f"{prompt}{text}")
322
  return "\n\n".join(results)
323
 
324
+ # Fonctions UI
325
  def set_random_prompt(n):
326
  selected = random.choice(prompt_list)
327
  return selected, f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"
 
329
  def set_prompt_from_dropdown(choice, n):
330
  return choice, f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"
331
 
332
+ def on_model_change(model_name):
333
+ load_model(model_name)
334
+ return f"Model loaded: {model_name}"
 
335
 
336
+ # Interface Gradio
337
+ with gr.Blocks(css=".gr-button { font-size: 16px; }") as demo:
338
+ gr.Markdown("## ✨ WizzGPT — Text Completion (GGUF - CPU)")
339
 
340
+ with gr.Row():
341
+ model_selector = gr.Dropdown(choices=list(model_versions.keys()), value="WizzGPTv7", label="🧠 Select Model")
342
+ model_status = gr.Textbox(value=f"Model loaded: {current_model}", label="Status", interactive=False)
 
343
 
344
  with gr.Group():
345
  with gr.Row():
346
+ prompt_input = gr.Textbox(label="📝 Prompt", lines=3, placeholder="Type or pick a prompt...")
347
  with gr.Row():
348
  prompt_dropdown = gr.Dropdown(choices=prompt_list, label="📌 Prompt Examples", scale=2)
349
  random_button = gr.Button("🎲 Random Prompt", scale=1)
 
353
 
354
  with gr.Accordion("⚙️ Advanced Settings", open=False):
355
  max_tokens = gr.Slider(10, 200, value=75, step=5, label="Max Tokens")
356
+ temperature = gr.Slider(0.1, 2.0, value=1.25, step=0.05, label="Temperature")
357
  top_p = gr.Slider(0.0, 1.0, value=0.90, step=0.05, label="Top-p")
358
+ top_k = gr.Slider(0, 128, value=128, step=1, label="Top-k")
359
+ repeat_penalty = gr.Slider(0.5, 2.0, value=1.4, step=0.05, label="Repeat Penalty")
360
 
361
  generate_button = gr.Button("🚀 Generate Prompt(s)")
362
  output = gr.Textbox(label="📄 Completions", lines=12)
363
 
364
+ # Interactions
365
  prompt_dropdown.change(set_prompt_from_dropdown, [prompt_dropdown, n_responses], [prompt_input, random_button, generate_button])
366
  random_button.click(set_random_prompt, inputs=n_responses, outputs=[prompt_input, random_button, generate_button])
367
+ n_responses.change(lambda n: (f"🎲 Random Prompt{'s' if n > 1 else ''}", f"🚀 Generate Prompt{'s' if n > 1 else ''}"), inputs=n_responses, outputs=[random_button, generate_button])
368
+ generate_button.click(fn=generate_completions, inputs=[prompt_input, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=output)
369
+
370
+ model_selector.change(on_model_change, inputs=model_selector, outputs=model_status)
 
 
 
 
 
 
371
 
372
  demo.launch()