Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -75,12 +75,12 @@ def respond(
|
|
| 75 |
history: list[tuple[str, str]],
|
| 76 |
preset,
|
| 77 |
temperature,
|
| 78 |
-
|
|
|
|
| 79 |
mirostat_eta,
|
|
|
|
| 80 |
frequency_penalty,
|
| 81 |
presence_penalty,
|
| 82 |
-
lp_start,
|
| 83 |
-
lp_decay,
|
| 84 |
max_tokens
|
| 85 |
):
|
| 86 |
|
|
@@ -104,7 +104,7 @@ def respond(
|
|
| 104 |
mirostat_mode=1,
|
| 105 |
mirostat_tau=mirostat_tau,
|
| 106 |
mirostat_eta=mirostat_eta,
|
| 107 |
-
max_tokens=
|
| 108 |
frequency_penalty=frequency_penalty,
|
| 109 |
presence_penalty=presence_penalty,
|
| 110 |
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
|
|
@@ -129,14 +129,14 @@ demo = gr.ChatInterface(
|
|
| 129 |
title="EliGPT v1.3",
|
| 130 |
additional_inputs=[
|
| 131 |
gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way - WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW FOR YOU", value="none"),
|
| 132 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.
|
| 133 |
-
gr.Slider(minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", info="Basically, how many drugs should the model be on?"),
|
| 134 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", info="I don't even know man"),
|
| 135 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
|
| 136 |
-
gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
|
| 137 |
gr.Slider(minimum=0, maximum=512, value=32, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
|
| 138 |
-
gr.Slider(minimum=0.5, maximum=1.5, value=1.02, step=0.01, label="Length penalty decay factor", info='How fast should
|
| 139 |
-
gr.Slider(minimum=
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
],
|
| 141 |
)
|
| 142 |
|
|
|
|
| 75 |
history: list[tuple[str, str]],
|
| 76 |
preset,
|
| 77 |
temperature,
|
| 78 |
+
lp_start,
|
| 79 |
+
lp_decay,
|
| 80 |
mirostat_eta,
|
| 81 |
+
mirostat_tau,
|
| 82 |
frequency_penalty,
|
| 83 |
presence_penalty,
|
|
|
|
|
|
|
| 84 |
max_tokens
|
| 85 |
):
|
| 86 |
|
|
|
|
| 104 |
mirostat_mode=1,
|
| 105 |
mirostat_tau=mirostat_tau,
|
| 106 |
mirostat_eta=mirostat_eta,
|
| 107 |
+
max_tokens=max_tokens,
|
| 108 |
frequency_penalty=frequency_penalty,
|
| 109 |
presence_penalty=presence_penalty,
|
| 110 |
logits_processor=lambda ids, logits: custom_lp_logits_processor(ids, logits, lp_start, lp_decay, len(convo))
|
|
|
|
| 129 |
title="EliGPT v1.3",
|
| 130 |
additional_inputs=[
|
| 131 |
gr.Radio(presets.keys(), label="Preset", info="Gaslight the model into acting a certain way - WARNING, IF YOU CHANGE THIS WHILE THERE ARE MESSAGES IN THE CHAT, THE MODEL WILL BECOME VERY SLOW FOR YOU", value="none"),
|
| 132 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.8, step=0.1, label="Temperature", info="How chaotic should the model be?"),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
gr.Slider(minimum=0, maximum=512, value=32, step=1, label="Length penalty start", info='When should the model start being more likely to shut up?'),
|
| 134 |
+
gr.Slider(minimum=0.5, maximum=1.5, value=1.02, step=0.01, label="Length penalty decay factor", info='How fast should that stop likelihood increase?'),
|
| 135 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Mirostat eta", info="How grammatical the model is or something"),
|
| 136 |
+
gr.Slider(minimum=0.0, maximum=10.0, value=3.0, step=0.5, label="Mirostat tau", info="Lower number keeps hallucinations to a minimum"),
|
| 137 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Frequency penalty", info='"Don\'repeat yourself"'),
|
| 138 |
+
gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.01, label="Presence penalty", info='"Use lots of diverse words"'),
|
| 139 |
+
gr.Slider(minimum=1, maximum=1024, value=1024, step=1, label="Max new tokens", info="How many words can the model generate at most?"),
|
| 140 |
],
|
| 141 |
)
|
| 142 |
|