kulia-moon commited on
Commit
8af1117
·
verified ·
1 Parent(s): d027023

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -33,7 +33,7 @@ AVAILABLE_MODELS = {
33
  "llama": {"description": "Llama 3.3 70B (larger, good for diversity)", "speed": "Moderate"},
34
  "claude": {"description": "Claude 3.5 Haiku (via Pollinations gateway, good for chat)", "speed": "Moderate"},
35
  "qwen-coder": {"description": "Qwen 2.5 Coder 32B (coder-focused, general chat is okay)", "speed": "Moderate"},
36
- "gemma": {"description": "Gemma 7B (Google's open model, good generalist)", "speed": "Moderate"},
37
  "dbrx": {"description": "DBRX (Databricks's large open model, might be slower)", "speed": "Slow"},
38
  "mixtral": {"description": "Mixtral 8x7B (Mixture of Experts, good balance of speed/quality)", "speed": "Fast/Moderate"},
39
  "command-r": {"description": "Command R (Cohere's powerful model)", "speed": "Moderate"},
@@ -451,7 +451,7 @@ with gr.Blocks() as demo:
451
  with gr.Tabs():
452
  with gr.Tab("Generate Conversations"):
453
  with gr.Row():
454
- num_conversations_input = gr.Slider(minimum=1, maximum=200, value=3, step=1, label="Number of Conversations to Generate", info="More conversations take longer and might hit API limits.")
455
 
456
  gr.Markdown("### Model Selection")
457
  model_selector_dropdown = gr.Dropdown(
 
33
  "llama": {"description": "Llama 3.3 70B (larger, good for diversity)", "speed": "Moderate"},
34
  "claude": {"description": "Claude 3.5 Haiku (via Pollinations gateway, good for chat)", "speed": "Moderate"},
35
  "qwen-coder": {"description": "Qwen 2.5 Coder 32B (coder-focused, general chat is okay)", "speed": "Moderate"},
36
+ "openai-fast": {"description": "Gemma 7B (Google's open model, good generalist)", "speed": "Moderate"},
37
  "dbrx": {"description": "DBRX (Databricks's large open model, might be slower)", "speed": "Slow"},
38
  "mixtral": {"description": "Mixtral 8x7B (Mixture of Experts, good balance of speed/quality)", "speed": "Fast/Moderate"},
39
  "command-r": {"description": "Command R (Cohere's powerful model)", "speed": "Moderate"},
 
451
  with gr.Tabs():
452
  with gr.Tab("Generate Conversations"):
453
  with gr.Row():
454
+ num_conversations_input = gr.Slider(minimum=1, maximum=2000, value=3, step=1, label="Number of Conversations to Generate", info="More conversations take longer and might hit API limits.")
455
 
456
  gr.Markdown("### Model Selection")
457
  model_selector_dropdown = gr.Dropdown(