Update app.py
Browse files
app.py
CHANGED
|
@@ -334,22 +334,27 @@ def on_model_change(model_name):
|
|
| 334 |
return f"Model loaded: {model_name}"
|
| 335 |
|
| 336 |
# Interface Gradio
|
| 337 |
-
with gr.Blocks(css="
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
gr.Markdown("## β¨ WizzGPT β Text Completion (GGUF - CPU)")
|
| 339 |
|
| 340 |
with gr.Row():
|
| 341 |
-
model_selector = gr.Dropdown(choices=list(model_versions.keys()), value="WizzGPTv7", label="π§
|
| 342 |
model_status = gr.Textbox(value=f"Model loaded: {current_model}", label="Status", interactive=False)
|
| 343 |
|
| 344 |
-
with gr.
|
| 345 |
-
|
| 346 |
-
prompt_input = gr.Textbox(label="π Prompt", lines=3, placeholder="Type or pick a prompt...")
|
| 347 |
-
with gr.Row():
|
| 348 |
-
prompt_dropdown = gr.Dropdown(choices=prompt_list, label="π Prompt Examples", scale=2)
|
| 349 |
-
random_button = gr.Button("π² Random Prompt", scale=1)
|
| 350 |
|
| 351 |
with gr.Row():
|
| 352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 353 |
|
| 354 |
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
| 355 |
max_tokens = gr.Slider(10, 200, value=75, step=5, label="Max Tokens")
|
|
@@ -358,15 +363,13 @@ with gr.Blocks(css=".gr-button { font-size: 16px; }") as demo:
|
|
| 358 |
top_k = gr.Slider(0, 128, value=128, step=1, label="Top-k")
|
| 359 |
repeat_penalty = gr.Slider(0.5, 2.0, value=1.4, step=0.05, label="Repeat Penalty")
|
| 360 |
|
| 361 |
-
|
| 362 |
-
output = gr.Textbox(label="π Completions", lines=12)
|
| 363 |
|
| 364 |
-
#
|
| 365 |
prompt_dropdown.change(set_prompt_from_dropdown, [prompt_dropdown, n_responses], [prompt_input, random_button, generate_button])
|
| 366 |
random_button.click(set_random_prompt, inputs=n_responses, outputs=[prompt_input, random_button, generate_button])
|
|
|
|
| 367 |
n_responses.change(lambda n: (f"π² Random Prompt{'s' if n > 1 else ''}", f"π Generate Prompt{'s' if n > 1 else ''}"), inputs=n_responses, outputs=[random_button, generate_button])
|
| 368 |
-
generate_button.click(fn=generate_completions, inputs=[prompt_input, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=output)
|
| 369 |
-
|
| 370 |
model_selector.change(on_model_change, inputs=model_selector, outputs=model_status)
|
| 371 |
|
| 372 |
demo.launch()
|
|
|
|
| 334 |
return f"Model loaded: {model_name}"
|
| 335 |
|
| 336 |
# Interface Gradio
|
| 337 |
+
with gr.Blocks(css="""
|
| 338 |
+
.gr-button { font-size: 15px; padding: 0.6em 1.2em; }
|
| 339 |
+
.gr-textbox textarea { font-size: 15px; }
|
| 340 |
+
""") as demo:
|
| 341 |
+
|
| 342 |
gr.Markdown("## β¨ WizzGPT β Text Completion (GGUF - CPU)")
|
| 343 |
|
| 344 |
with gr.Row():
|
| 345 |
+
model_selector = gr.Dropdown(choices=list(model_versions.keys()), value="WizzGPTv7", label="π§ Model")
|
| 346 |
model_status = gr.Textbox(value=f"Model loaded: {current_model}", label="Status", interactive=False)
|
| 347 |
|
| 348 |
+
with gr.Row():
|
| 349 |
+
prompt_input = gr.Textbox(label="π Prompt", lines=2, placeholder="Type or select a prompt...")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 350 |
|
| 351 |
with gr.Row():
|
| 352 |
+
prompt_dropdown = gr.Dropdown(choices=prompt_list, label="π Examples")
|
| 353 |
+
random_button = gr.Button("π²", size="sm", elem_id="random-btn", min_width=30)
|
| 354 |
+
generate_button = gr.Button("π", size="sm", elem_id="generate-btn")
|
| 355 |
+
|
| 356 |
+
with gr.Row():
|
| 357 |
+
n_responses = gr.Slider(1, 7, value=1, step=1, label="Number")
|
| 358 |
|
| 359 |
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
| 360 |
max_tokens = gr.Slider(10, 200, value=75, step=5, label="Max Tokens")
|
|
|
|
| 363 |
top_k = gr.Slider(0, 128, value=128, step=1, label="Top-k")
|
| 364 |
repeat_penalty = gr.Slider(0.5, 2.0, value=1.4, step=0.05, label="Repeat Penalty")
|
| 365 |
|
| 366 |
+
output = gr.Textbox(label="π Completions", lines=10)
|
|
|
|
| 367 |
|
| 368 |
+
# Event bindings
|
| 369 |
prompt_dropdown.change(set_prompt_from_dropdown, [prompt_dropdown, n_responses], [prompt_input, random_button, generate_button])
|
| 370 |
random_button.click(set_random_prompt, inputs=n_responses, outputs=[prompt_input, random_button, generate_button])
|
| 371 |
+
generate_button.click(generate_completions, inputs=[prompt_input, n_responses, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=output)
|
| 372 |
n_responses.change(lambda n: (f"π² Random Prompt{'s' if n > 1 else ''}", f"π Generate Prompt{'s' if n > 1 else ''}"), inputs=n_responses, outputs=[random_button, generate_button])
|
|
|
|
|
|
|
| 373 |
model_selector.change(on_model_change, inputs=model_selector, outputs=model_status)
|
| 374 |
|
| 375 |
demo.launch()
|