aadya1762 commited on
Commit
bc54c1b
·
1 Parent(s): 94b5c59

bug fixes

Browse files
Files changed (1) hide show
  1. gemmademo/_chat.py +10 -9
gemmademo/_chat.py CHANGED
@@ -145,11 +145,12 @@ class GradioChat:
145
  task_dropdown.change(
146
  _update_examples, task_dropdown, examples_list.dataset
147
  )
 
148
  temperature_slider = gr.Slider(
149
- minimum=0.1, maximum=2, value=1.0, label="Temperature"
150
  )
151
  gr.Markdown(
152
- "**Temperature:** Controls the randomness of the model's output. Lower values make the output more deterministic."
153
  )
154
  temperature_slider.change(
155
  fn=lambda temp: setattr(self.model, "temperature", temp),
@@ -157,10 +158,10 @@ class GradioChat:
157
  )
158
 
159
  top_p_slider = gr.Slider(
160
- minimum=0.1, maximum=1.0, value=0.9, label="Top P"
161
  )
162
  gr.Markdown(
163
- "**Top P:** Limits the sampling to a subset of the most probable tokens. Lower values make the output more focused."
164
  )
165
  top_p_slider.change(
166
  fn=lambda top_p: setattr(self.model, "top_p", top_p),
@@ -168,10 +169,10 @@ class GradioChat:
168
  )
169
 
170
  top_k_slider = gr.Slider(
171
- minimum=1, maximum=100, value=50, label="Top K"
172
  )
173
  gr.Markdown(
174
- "**Top K:** Limits the sampling to the top K most probable tokens. Lower values make the output more focused."
175
  )
176
  top_k_slider.change(
177
  fn=lambda top_k: setattr(self.model, "top_k", top_k),
@@ -179,7 +180,7 @@ class GradioChat:
179
  )
180
 
181
  repetition_penalty_slider = gr.Slider(
182
- minimum=1.0, maximum=2.0, value=1.0, label="Repetition Penalty"
183
  )
184
  gr.Markdown(
185
  "**Repetition Penalty:** Penalizes repeated tokens to reduce repetition in the output."
@@ -192,10 +193,10 @@ class GradioChat:
192
  )
193
 
194
  max_tokens_slider = gr.Slider(
195
- minimum=512, maximum=2048, value=1024, label="Max Tokens"
196
  )
197
  gr.Markdown(
198
- "**Max Tokens:** Sets the maximum number of tokens the model can generate in a single response."
199
  )
200
  max_tokens_slider.change(
201
  fn=lambda max_tokens: setattr(
 
145
  task_dropdown.change(
146
  _update_examples, task_dropdown, examples_list.dataset
147
  )
148
+
149
  temperature_slider = gr.Slider(
150
+ minimum=0.1, maximum=2, value=self.model.temperature, label="Temperature"
151
  )
152
  gr.Markdown(
153
+ "**Temperature:** Lower values make the output more deterministic."
154
  )
155
  temperature_slider.change(
156
  fn=lambda temp: setattr(self.model, "temperature", temp),
 
158
  )
159
 
160
  top_p_slider = gr.Slider(
161
+ minimum=0.1, maximum=1.0, value=self.model.top_p, label="Top P"
162
  )
163
  gr.Markdown(
164
+ "**Top P:** Lower values make the output more focused."
165
  )
166
  top_p_slider.change(
167
  fn=lambda top_p: setattr(self.model, "top_p", top_p),
 
169
  )
170
 
171
  top_k_slider = gr.Slider(
172
+ minimum=1, maximum=100, value=self.model.top_k, label="Top K"
173
  )
174
  gr.Markdown(
175
+ "**Top K:** Lower values make the output more focused."
176
  )
177
  top_k_slider.change(
178
  fn=lambda top_k: setattr(self.model, "top_k", top_k),
 
180
  )
181
 
182
  repetition_penalty_slider = gr.Slider(
183
+ minimum=1.0, maximum=2.0, value=self.model.repitition_penalty, label="Repetition Penalty"
184
  )
185
  gr.Markdown(
186
  "**Repetition Penalty:** Penalizes repeated tokens to reduce repetition in the output."
 
193
  )
194
 
195
  max_tokens_slider = gr.Slider(
196
+ minimum=512, maximum=2048, value=self.model.max_tokens, label="Max Tokens"
197
  )
198
  gr.Markdown(
199
+ "**Max Tokens:** Sets the maximum number of tokens the model can generate in one response."
200
  )
201
  max_tokens_slider.change(
202
  fn=lambda max_tokens: setattr(