CryptoCreeper commited on
Commit
c551a45
·
verified ·
1 Parent(s): d048b7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -20
app.py CHANGED
@@ -28,7 +28,7 @@ def chat_with_model(message, history, model_choice):
28
  tokenizer, model = load_model(model_choice)
29
  device = model.device
30
 
31
- prompt = f"User: {message}\nAssistant:"
32
 
33
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
34
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
@@ -36,12 +36,10 @@ def chat_with_model(message, history, model_choice):
36
  generation_kwargs = dict(
37
  **inputs,
38
  streamer=streamer,
39
- max_new_tokens=512,
40
  temperature=0.7,
41
  top_p=0.9,
42
  do_sample=True,
43
- stop_strings=["User:", "Assistant:"],
44
- tokenizer=tokenizer
45
  )
46
 
47
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
@@ -49,8 +47,6 @@ def chat_with_model(message, history, model_choice):
49
 
50
  partial_text = ""
51
  for new_text in streamer:
52
- if "User:" in new_text:
53
- break
54
  partial_text += new_text
55
  yield [
56
  {"role": "user", "content": message},
@@ -73,32 +69,28 @@ def create_demo():
73
 
74
  with gr.Blocks(theme=custom_theme, title="Creeper AI Chatbot") as demo:
75
  gr.Markdown("# 🌿 Creeper AI Chatbot")
76
- gr.Markdown("*Low-latency Liquid Intelligence*")
77
 
78
- with gr.Row():
79
- model_choice = gr.Dropdown(
80
- label="Model Core",
81
- choices=list(MODEL_NAMES.keys()),
82
- value="LFM 1.2B",
83
- interactive=True
84
- )
85
 
86
  chatbot = gr.Chatbot(
87
- label="Neural Interface",
88
  type="messages",
89
- height=500,
90
- show_copy_button=True
91
  )
92
 
93
  with gr.Row():
94
  msg = gr.Textbox(
95
- label="Input Command",
96
- placeholder="Message Creeper AI...",
97
  scale=4
98
  )
99
  submit_btn = gr.Button("Send", variant="primary", scale=1)
100
 
101
- clear = gr.Button("Reset Memory (Wipe)")
102
 
103
  def add_user_message(user_message):
104
  return "", [{"role": "user", "content": user_message}]
 
28
  tokenizer, model = load_model(model_choice)
29
  device = model.device
30
 
31
+ prompt = message
32
 
33
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
34
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
 
36
  generation_kwargs = dict(
37
  **inputs,
38
  streamer=streamer,
39
+ max_new_tokens=1024,
40
  temperature=0.7,
41
  top_p=0.9,
42
  do_sample=True,
 
 
43
  )
44
 
45
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
 
47
 
48
  partial_text = ""
49
  for new_text in streamer:
 
 
50
  partial_text += new_text
51
  yield [
52
  {"role": "user", "content": message},
 
69
 
70
  with gr.Blocks(theme=custom_theme, title="Creeper AI Chatbot") as demo:
71
  gr.Markdown("# 🌿 Creeper AI Chatbot")
 
72
 
73
+ model_choice = gr.Dropdown(
74
+ label="Select Model",
75
+ choices=list(MODEL_NAMES.keys()),
76
+ value="LFM 1.2B"
77
+ )
 
 
78
 
79
  chatbot = gr.Chatbot(
80
+ label="Raw AI Output",
81
  type="messages",
82
+ height=500
 
83
  )
84
 
85
  with gr.Row():
86
  msg = gr.Textbox(
87
+ label="Direct Input",
88
+ placeholder="Type exactly what you want to send...",
89
  scale=4
90
  )
91
  submit_btn = gr.Button("Send", variant="primary", scale=1)
92
 
93
+ clear = gr.Button("Clear")
94
 
95
  def add_user_message(user_message):
96
  return "", [{"role": "user", "content": user_message}]