Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,8 +15,8 @@ def respond(
|
|
| 15 |
top_p,
|
| 16 |
frequency_penalty,
|
| 17 |
seed,
|
| 18 |
-
custom_model
|
| 19 |
-
provider
|
| 20 |
model_search_term,
|
| 21 |
selected_model
|
| 22 |
):
|
|
@@ -25,8 +25,8 @@ def respond(
|
|
| 25 |
print(f"System message: {system_message}")
|
| 26 |
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
| 27 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
| 28 |
-
print(f"Selected
|
| 29 |
-
print(f"Selected
|
| 30 |
print(f"Model search term: {model_search_term}")
|
| 31 |
print(f"Selected model from radio: {selected_model}")
|
| 32 |
|
|
@@ -149,14 +149,6 @@ seed_slider = gr.Slider(
|
|
| 149 |
label="Seed (-1 for random)"
|
| 150 |
)
|
| 151 |
|
| 152 |
-
# Custom model box
|
| 153 |
-
custom_model_box = gr.Textbox(
|
| 154 |
-
value="",
|
| 155 |
-
label="Custom Model",
|
| 156 |
-
info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
|
| 157 |
-
placeholder="meta-llama/Llama-3.3-70B-Instruct"
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
# Provider selection
|
| 161 |
providers_list = [
|
| 162 |
"hf-inference", # Default Hugging Face Inference
|
|
@@ -178,6 +170,14 @@ provider_radio = gr.Radio(
|
|
| 178 |
info="Select which inference provider to use. Uses your Hugging Face PRO credits."
|
| 179 |
)
|
| 180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
# Model selection components
|
| 182 |
model_search_box = gr.Textbox(
|
| 183 |
label="Filter Models",
|
|
@@ -279,4 +279,4 @@ print("Gradio interface initialized.")
|
|
| 279 |
|
| 280 |
if __name__ == "__main__":
|
| 281 |
print("Launching the demo application.")
|
| 282 |
-
demo.launch(show_api=True)
|
|
|
|
| 15 |
top_p,
|
| 16 |
frequency_penalty,
|
| 17 |
seed,
|
| 18 |
+
provider, # Moved before custom_model
|
| 19 |
+
custom_model, # Moved after provider
|
| 20 |
model_search_term,
|
| 21 |
selected_model
|
| 22 |
):
|
|
|
|
| 25 |
print(f"System message: {system_message}")
|
| 26 |
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
| 27 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
| 28 |
+
print(f"Selected provider: {provider}") # Updated order
|
| 29 |
+
print(f"Selected model (custom_model): {custom_model}") # Updated order
|
| 30 |
print(f"Model search term: {model_search_term}")
|
| 31 |
print(f"Selected model from radio: {selected_model}")
|
| 32 |
|
|
|
|
| 149 |
label="Seed (-1 for random)"
|
| 150 |
)
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
# Provider selection
|
| 153 |
providers_list = [
|
| 154 |
"hf-inference", # Default Hugging Face Inference
|
|
|
|
| 170 |
info="Select which inference provider to use. Uses your Hugging Face PRO credits."
|
| 171 |
)
|
| 172 |
|
| 173 |
+
# Custom model box
|
| 174 |
+
custom_model_box = gr.Textbox(
|
| 175 |
+
value="",
|
| 176 |
+
label="Custom Model",
|
| 177 |
+
info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
|
| 178 |
+
placeholder="meta-llama/Llama-3.3-70B-Instruct"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
# Model selection components
|
| 182 |
model_search_box = gr.Textbox(
|
| 183 |
label="Filter Models",
|
|
|
|
| 279 |
|
| 280 |
if __name__ == "__main__":
|
| 281 |
print("Launching the demo application.")
|
| 282 |
+
demo.launch(show_api=True) # Fixed typo: demo. Launch -> demo.launch
|