aadya1762 commited on
Commit
da328b0
·
1 Parent(s): e96d38d

UI cleanup

Browse files
Files changed (1) hide show
  1. gemmademo/_chat.py +98 -80
gemmademo/_chat.py CHANGED
@@ -102,27 +102,30 @@ class GradioChat:
102
 
103
  with gr.Blocks() as demo:
104
  with gr.Row():
105
- with gr.Column(scale=3): # Sidebar column
106
- gr.Markdown(
107
- "## Google Gemma Models: lightweight, state-of-the-art open models from Google"
108
- )
109
- task_dropdown = gr.Dropdown(
110
- choices=self.task_options,
111
- value=self.current_task_name,
112
- label="Select Task",
113
- )
114
- model_dropdown = gr.Dropdown(
115
- choices=self.model_options,
116
- value=self.current_model_name,
117
- label="Select Gemma Model",
118
- )
119
- chat_interface = gr.ChatInterface(
120
- chat_fn,
121
- additional_inputs=[model_dropdown, task_dropdown],
122
- textbox=gr.Textbox(
123
- placeholder="Ask me something...", container=False
124
- ),
125
- )
 
 
 
126
 
127
  with gr.Column(scale=1):
128
  gr.Markdown(
@@ -144,65 +147,80 @@ class GradioChat:
144
  task_dropdown.change(
145
  _update_examples, task_dropdown, examples_list.dataset
146
  )
147
-
148
- temperature_slider = gr.Slider(
149
- minimum=0.1, maximum=2, value=self.model.temperature, label="Temperature"
150
- )
151
- gr.Markdown(
152
- "**Temperature:** Lower values make the output more deterministic."
153
- )
154
- temperature_slider.change(
155
- fn=lambda temp: setattr(self.model, "temperature", temp),
156
- inputs=temperature_slider,
157
- )
158
-
159
- top_p_slider = gr.Slider(
160
- minimum=0.1, maximum=1.0, value=self.model.top_p, label="Top P"
161
- )
162
- gr.Markdown(
163
- "**Top P:** Lower values make the output more focused."
164
- )
165
- top_p_slider.change(
166
- fn=lambda top_p: setattr(self.model, "top_p", top_p),
167
- inputs=top_p_slider,
168
- )
169
-
170
- top_k_slider = gr.Slider(
171
- minimum=1, maximum=100, value=self.model.top_k, label="Top K"
172
- )
173
- gr.Markdown(
174
- "**Top K:** Lower values make the output more focused."
175
- )
176
- top_k_slider.change(
177
- fn=lambda top_k: setattr(self.model, "top_k", top_k),
178
- inputs=top_k_slider,
179
- )
180
-
181
- repetition_penalty_slider = gr.Slider(
182
- minimum=1.0, maximum=2.0, value=self.model.repeat_penalty, label="Repetition Penalty"
183
- )
184
- gr.Markdown(
185
- "**Repetition Penalty:** Penalizes repeated tokens to reduce repetition in the output."
186
- )
187
- repetition_penalty_slider.change(
188
- fn=lambda penalty: setattr(
189
- self.model, "repeat_penalty", penalty
190
- ),
191
- inputs=repetition_penalty_slider,
192
- )
193
-
194
- max_tokens_slider = gr.Slider(
195
- minimum=512, maximum=2048, value=self.model.max_tokens, label="Max Tokens"
196
- )
197
- gr.Markdown(
198
- "**Max Tokens:** Sets the maximum number of tokens the model can generate in one response."
199
- )
200
- max_tokens_slider.change(
201
- fn=lambda max_tokens: setattr(
202
- self.model, "max_tokens", max_tokens
203
- ),
204
- inputs=max_tokens_slider,
205
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  demo.launch()
208
 
 
102
 
103
  with gr.Blocks() as demo:
104
  with gr.Row():
105
+ with gr.Accordion(
106
+ "Basic Settings ⚙️", open=False
107
+ ): # Make the sidebar foldable
108
+ with gr.Column(scale=3): # Sidebar column
109
+ gr.Markdown(
110
+ "## Google Gemma Models: lightweight, state-of-the-art open models from Google"
111
+ )
112
+ task_dropdown = gr.Dropdown(
113
+ choices=self.task_options,
114
+ value=self.current_task_name,
115
+ label="Select Task",
116
+ )
117
+ model_dropdown = gr.Dropdown(
118
+ choices=self.model_options,
119
+ value=self.current_model_name,
120
+ label="Select Gemma Model",
121
+ )
122
+ chat_interface = gr.ChatInterface(
123
+ chat_fn,
124
+ additional_inputs=[model_dropdown, task_dropdown],
125
+ textbox=gr.Textbox(
126
+ placeholder="Ask me something...", container=False
127
+ ),
128
+ )
129
 
130
  with gr.Column(scale=1):
131
  gr.Markdown(
 
147
  task_dropdown.change(
148
  _update_examples, task_dropdown, examples_list.dataset
149
  )
150
+ with gr.Accordion("Model Configuration ⚙️", open=False):
151
+ temperature_slider = gr.Slider(
152
+ minimum=0.1,
153
+ maximum=2,
154
+ value=self.model.temperature,
155
+ label="Temperature",
156
+ )
157
+ gr.Markdown(
158
+ "**Temperature:** Lower values make the output more deterministic."
159
+ )
160
+ temperature_slider.change(
161
+ fn=lambda temp: setattr(self.model, "temperature", temp),
162
+ inputs=temperature_slider,
163
+ )
164
+
165
+ top_p_slider = gr.Slider(
166
+ minimum=0.1,
167
+ maximum=1.0,
168
+ value=self.model.top_p,
169
+ label="Top P",
170
+ )
171
+ gr.Markdown(
172
+ "**Top P:** Lower values make the output more focused."
173
+ )
174
+ top_p_slider.change(
175
+ fn=lambda top_p: setattr(self.model, "top_p", top_p),
176
+ inputs=top_p_slider,
177
+ )
178
+
179
+ top_k_slider = gr.Slider(
180
+ minimum=1,
181
+ maximum=100,
182
+ value=self.model.top_k,
183
+ label="Top K",
184
+ )
185
+ gr.Markdown(
186
+ "**Top K:** Lower values make the output more focused."
187
+ )
188
+ top_k_slider.change(
189
+ fn=lambda top_k: setattr(self.model, "top_k", top_k),
190
+ inputs=top_k_slider,
191
+ )
192
+
193
+ repetition_penalty_slider = gr.Slider(
194
+ minimum=1.0,
195
+ maximum=2.0,
196
+ value=self.model.repeat_penalty,
197
+ label="Repetition Penalty",
198
+ )
199
+ gr.Markdown(
200
+ "**Repetition Penalty:** Penalizes repeated tokens to reduce repetition in the output."
201
+ )
202
+ repetition_penalty_slider.change(
203
+ fn=lambda penalty: setattr(
204
+ self.model, "repeat_penalty", penalty
205
+ ),
206
+ inputs=repetition_penalty_slider,
207
+ )
208
+
209
+ max_tokens_slider = gr.Slider(
210
+ minimum=512,
211
+ maximum=2048,
212
+ value=self.model.max_tokens,
213
+ label="Max Tokens",
214
+ )
215
+ gr.Markdown(
216
+ "**Max Tokens:** Sets the maximum number of tokens the model can generate in one response."
217
+ )
218
+ max_tokens_slider.change(
219
+ fn=lambda max_tokens: setattr(
220
+ self.model, "max_tokens", max_tokens
221
+ ),
222
+ inputs=max_tokens_slider,
223
+ )
224
 
225
  demo.launch()
226