GilbertAkham commited on
Commit
f36d844
·
verified ·
1 Parent(s): cf02679

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -21
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
  # -------------------------------------------------
5
- # Inference function
6
  # -------------------------------------------------
7
  def respond(
8
  message,
@@ -14,34 +14,29 @@ def respond(
14
  hf_token: gr.OAuthToken,
15
  ):
16
  """
17
- Chat interface for the Gilbert Multitask Reasoning Model.
18
- Uses the Hugging Face Inference API for GilbertAkham/deepseek-R1-multitask-lora.
19
  """
20
  client = InferenceClient(
21
  token=hf_token.token,
22
  model="GilbertAkham/deepseek-R1-multitask-lora"
23
  )
24
 
25
- # Build prompt history as structured messages
26
- messages = [{"role": "system", "content": system_message}]
27
- messages.extend(history)
28
- messages.append({"role": "user", "content": message})
29
 
30
- response = ""
31
 
32
- # Stream responses from model
33
- for message in client.chat_completion(
34
- messages=messages,
35
- max_tokens=max_tokens,
36
  stream=True,
37
  temperature=temperature,
38
  top_p=top_p,
39
  ):
40
- choices = message.choices
41
- token = ""
42
- if len(choices) and choices[0].delta and choices[0].delta.content:
43
- token = choices[0].delta.content
44
-
45
  response += token
46
  yield response
47
 
@@ -55,9 +50,8 @@ chatbot = gr.ChatInterface(
55
  additional_inputs=[
56
  gr.Textbox(
57
  value=(
58
- "You are Reasoning-Bot, an intelligent multitask reasoning model capable of "
59
- "drafting emails, summarizing text, continuing stories, solving reasoning "
60
- "questions, and engaging in helpful conversations."
61
  ),
62
  label="🧠 System Message"
63
  ),
@@ -79,7 +73,7 @@ with gr.Blocks(title="Gilbert Multitask Reasoning AI") as demo:
79
  ### 💡 About
80
  - Model: **GilbertAkham/deepseek-R1-multitask-lora**
81
  - Base: DeepSeek-R1-Distill-Qwen-1.5B
82
- - Trained for email, reasoning, chat, and summarization tasks.
83
  """
84
  )
85
  chatbot.render()
 
2
  from huggingface_hub import InferenceClient
3
 
4
  # -------------------------------------------------
5
+ # Inference function (using text generation)
6
  # -------------------------------------------------
7
  def respond(
8
  message,
 
14
  hf_token: gr.OAuthToken,
15
  ):
16
  """
17
+ Chat interface for Gilbert Multitask Reasoning Model.
18
+ Uses text_generation() because model doesn't support chat_completion().
19
  """
20
  client = InferenceClient(
21
  token=hf_token.token,
22
  model="GilbertAkham/deepseek-R1-multitask-lora"
23
  )
24
 
25
+ # Build conversation prompt manually
26
+ history_text = ""
27
+ for turn in history:
28
+ history_text += f"User: {turn['role']}\n{turn['content']}\n"
29
 
30
+ prompt = f"{system_message}\n\n{history_text}\nUser: {message}\nAssistant:"
31
 
32
+ response = ""
33
+ for token in client.text_generation(
34
+ prompt,
35
+ max_new_tokens=max_tokens,
36
  stream=True,
37
  temperature=temperature,
38
  top_p=top_p,
39
  ):
 
 
 
 
 
40
  response += token
41
  yield response
42
 
 
50
  additional_inputs=[
51
  gr.Textbox(
52
  value=(
53
+ "You are Reasoning-Bot, a multitask reasoning AI trained for "
54
+ "chatting, summarization, creative writing, reasoning, and formal replies."
 
55
  ),
56
  label="🧠 System Message"
57
  ),
 
73
  ### 💡 About
74
  - Model: **GilbertAkham/deepseek-R1-multitask-lora**
75
  - Base: DeepSeek-R1-Distill-Qwen-1.5B
76
+ - Tasks: Chat, email, reasoning, story continuation, summarization.
77
  """
78
  )
79
  chatbot.render()