hadadrjt commited on
Commit
bb118d3
·
1 Parent(s): 6197f10

LFM2.5-1.2B: Unlock the context length limit.

Browse files
Files changed (1) hide show
  1. app.py +0 -12
app.py CHANGED
@@ -10,7 +10,6 @@ import gradio as gr
10
  async def playground(
11
  message,
12
  history,
13
- num_ctx,
14
  temperature,
15
  repeat_penalty,
16
  top_k,
@@ -41,7 +40,6 @@ async def playground(
41
  model="hf.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF:Q4_K_M",
42
  messages=messages,
43
  options={
44
- "num_ctx": int(num_ctx),
45
  "temperature": float(temperature),
46
  "repeat_penalty": float(repeat_penalty),
47
  "top_k": int(top_k),
@@ -91,15 +89,6 @@ with gr.Blocks(
91
  )
92
  gr.Markdown("---")
93
  gr.Markdown("## Model Parameters")
94
- num_ctx = gr.Slider(
95
- minimum=512,
96
- maximum=1024,
97
- value=512,
98
- step=128,
99
- label="Context Length",
100
- info="Maximum context window size (limited to CPU usage)"
101
- )
102
- gr.Markdown("")
103
  temperature = gr.Slider(
104
  minimum=0.1,
105
  maximum=1.0,
@@ -139,7 +128,6 @@ with gr.Blocks(
139
  gr.ChatInterface(
140
  fn=playground,
141
  additional_inputs=[
142
- num_ctx,
143
  temperature,
144
  repeat_penalty,
145
  top_k,
 
10
  async def playground(
11
  message,
12
  history,
 
13
  temperature,
14
  repeat_penalty,
15
  top_k,
 
40
  model="hf.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF:Q4_K_M",
41
  messages=messages,
42
  options={
 
43
  "temperature": float(temperature),
44
  "repeat_penalty": float(repeat_penalty),
45
  "top_k": int(top_k),
 
89
  )
90
  gr.Markdown("---")
91
  gr.Markdown("## Model Parameters")
 
 
 
 
 
 
 
 
 
92
  temperature = gr.Slider(
93
  minimum=0.1,
94
  maximum=1.0,
 
128
  gr.ChatInterface(
129
  fn=playground,
130
  additional_inputs=[
 
131
  temperature,
132
  repeat_penalty,
133
  top_k,