hashirehtisham commited on
Commit
4a4848d
·
verified ·
1 Parent(s): 26ac651

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -20
app.py CHANGED
@@ -11,42 +11,51 @@ def respond(
11
  top_p,
12
  hf_token: gr.OAuthToken,
13
  ):
 
 
 
 
 
14
 
15
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
16
-
17
  messages = [{"role": "system", "content": system_message}]
18
-
19
  messages.extend(history)
20
-
21
  messages.append({"role": "user", "content": message})
22
 
 
 
 
 
 
 
 
 
 
 
 
23
  response = ""
24
 
25
- for message in client.chat_completion(
26
- messages,
27
- max_tokens=max_tokens,
 
28
  stream=True,
29
  temperature=temperature,
30
  top_p=top_p,
31
  ):
32
- choices = message.choices
33
- token = ""
34
- if len(choices) and choices[0].delta.content:
35
- token = choices[0].delta.content
36
-
37
- response += token
38
  yield response
39
 
40
 
41
- """
42
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
43
- """
44
  chatbot = gr.ChatInterface(
45
- respond,
46
  type="messages",
47
  additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
 
 
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
  gr.Slider(
52
  minimum=0.1,
@@ -63,6 +72,5 @@ with gr.Blocks() as demo:
63
  gr.LoginButton()
64
  chatbot.render()
65
 
66
-
67
  if __name__ == "__main__":
68
  demo.launch()
 
11
  top_p,
12
  hf_token: gr.OAuthToken,
13
  ):
14
+ # Initialize the Hugging Face inference client for Gemma
15
+ client = InferenceClient(
16
+ model="google/gemma-2b-it",
17
+ token=hf_token.token,
18
+ )
19
 
20
+ # Prepare the full chat history
 
21
  messages = [{"role": "system", "content": system_message}]
 
22
  messages.extend(history)
 
23
  messages.append({"role": "user", "content": message})
24
 
25
+ # Build the text prompt manually (Gemma expects plain text, not OpenAI chat schema)
26
+ prompt = ""
27
+ for msg in messages:
28
+ if msg["role"] == "system":
29
+ prompt += f"System: {msg['content']}\n"
30
+ elif msg["role"] == "user":
31
+ prompt += f"User: {msg['content']}\n"
32
+ elif msg["role"] == "assistant":
33
+ prompt += f"Assistant: {msg['content']}\n"
34
+ prompt += "Assistant:"
35
+
36
  response = ""
37
 
38
+ # Stream the model output token by token
39
+ for token in client.text_generation(
40
+ prompt,
41
+ max_new_tokens=max_tokens,
42
  stream=True,
43
  temperature=temperature,
44
  top_p=top_p,
45
  ):
46
+ response += token.token
 
 
 
 
 
47
  yield response
48
 
49
 
 
 
 
50
  chatbot = gr.ChatInterface(
51
+ fn=respond,
52
  type="messages",
53
  additional_inputs=[
54
+ gr.Textbox(
55
+ value="You are a friendly Pregnancy 1st month guidance chatbot named 'PREGNITECH' developed by team Helix AI which consists of 3 members: Hashir Ehtisham, Lameea Khan, and Kainat Ali.",
56
+ label="System message",
57
+ ),
58
+ gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max new tokens"),
59
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
60
  gr.Slider(
61
  minimum=0.1,
 
72
  gr.LoginButton()
73
  chatbot.render()
74
 
 
75
  if __name__ == "__main__":
76
  demo.launch()