Spaces:
Sleeping
Sleeping
fix
Browse files- gemmademo/_chat.py +3 -4
gemmademo/_chat.py
CHANGED
|
@@ -26,20 +26,19 @@ class GradioChat:
|
|
| 26 |
return LlamaCppGemmaModel(name=model_name).load_model()
|
| 27 |
|
| 28 |
def _chat(self):
|
| 29 |
-
def chat_fn(message, history, selected_model):
|
| 30 |
if selected_model != self.current_model_name:
|
| 31 |
self.current_model_name = selected_model
|
| 32 |
self.model = self._load_model(selected_model) # Reload model when changed
|
| 33 |
|
| 34 |
-
prompt = self.prompt_manager.get_prompt(user_input=message)
|
| 35 |
response = self.model.generate_response(prompt)
|
| 36 |
return response
|
| 37 |
|
| 38 |
chat_interface = gr.ChatInterface(
|
| 39 |
chat_fn,
|
| 40 |
-
textbox=gr.Textbox(placeholder="
|
| 41 |
additional_inputs=[
|
| 42 |
-
gr.State(self.current_model_name), # Store selected model state
|
| 43 |
gr.Dropdown(choices=self.model_options, value=self.current_model_name, label="Select Gemma Model"),
|
| 44 |
gr.Dropdown(choices=self.task_options, value="Question Answering", label="Select Task"),
|
| 45 |
],
|
|
|
|
| 26 |
return LlamaCppGemmaModel(name=model_name).load_model()
|
| 27 |
|
| 28 |
def _chat(self):
|
| 29 |
+
def chat_fn(message, history, selected_model, selected_task):
|
| 30 |
if selected_model != self.current_model_name:
|
| 31 |
self.current_model_name = selected_model
|
| 32 |
self.model = self._load_model(selected_model) # Reload model when changed
|
| 33 |
|
| 34 |
+
prompt = self.prompt_manager.get_prompt(user_input=message, task=selected_task)
|
| 35 |
response = self.model.generate_response(prompt)
|
| 36 |
return response
|
| 37 |
|
| 38 |
chat_interface = gr.ChatInterface(
|
| 39 |
chat_fn,
|
| 40 |
+
textbox=gr.Textbox(placeholder="Ask me something...", container=False),
|
| 41 |
additional_inputs=[
|
|
|
|
| 42 |
gr.Dropdown(choices=self.model_options, value=self.current_model_name, label="Select Gemma Model"),
|
| 43 |
gr.Dropdown(choices=self.task_options, value="Question Answering", label="Select Task"),
|
| 44 |
],
|