burhaanmughal commited on
Commit
9abcb7a
·
verified ·
1 Parent(s): 2002f41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -45
app.py CHANGED
@@ -1,67 +1,45 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- ):
13
  """
14
- Public inference (no login required)
 
15
  """
16
- client = InferenceClient(model="google/gemma-2b-it")
17
-
18
- messages = [{"role": "system", "content": system_message}]
19
- messages.extend(history)
20
- messages.append({"role": "user", "content": message})
21
-
22
- response = ""
23
 
24
  try:
25
- # Use text_generation since chat_completion is not supported
26
- prompt = system_message + "\n"
27
- for msg in history:
28
- prompt += f"{msg['role'].capitalize()}: {msg['content']}\n"
29
- prompt += f"User: {message}\nAssistant:"
30
-
31
- gen = client.text_generation(
32
  prompt=prompt,
33
- max_new_tokens=max_tokens,
34
- temperature=temperature,
35
- top_p=top_p,
36
  )
37
- response += gen
38
  yield response
39
  except:
40
- yield "Server busy. Please try again."
41
-
42
 
 
43
  chatbot = gr.ChatInterface(
44
  respond,
45
- type="messages",
46
- additional_inputs=[
47
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
48
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
49
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
50
- gr.Slider(
51
- minimum=0.1,
52
- maximum=1.0,
53
- value=0.95,
54
- step=0.05,
55
- label="Top-p (nucleus sampling)",
56
- ),
57
- ],
58
  )
59
 
60
  with gr.Blocks() as demo:
61
- # Removed login button to make it public
62
  chatbot.render()
63
 
64
-
65
  if __name__ == "__main__":
66
- demo.queue() # optional, helps with multiple users
67
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import time
4
 
5
+ # Public model (fast + free)
6
+ client = InferenceClient(model="google/gemma-2b-it")
7
 
8
+ SYSTEM_PROMPT = "You are a friendly AI assistant."
9
+
10
+ def respond(message, history):
 
 
 
 
 
11
  """
12
+ Respond function for Gradio ChatInterface.
13
+ History is a list of (user, assistant) tuples.
14
  """
15
+ prompt = SYSTEM_PROMPT + "\n"
16
+ for u, a in history:
17
+ prompt += f"User: {u}\nAssistant: {a}\n"
18
+ prompt += f"User: {message}\nAssistant:"
 
 
 
19
 
20
  try:
21
+ # Generate response
22
+ response = client.text_generation(
 
 
 
 
 
23
  prompt=prompt,
24
+ max_new_tokens=150,
25
+ temperature=0.7,
26
+ top_p=0.9,
27
  )
 
28
  yield response
29
  except:
30
+ # If server busy
31
+ yield "AI is waking up… Please wait a few seconds"
32
 
33
+ # Gradio ChatInterface
34
  chatbot = gr.ChatInterface(
35
  respond,
36
+ title="AI Assistant",
37
+ description="Public AI chatbot (free)",
 
 
 
 
 
 
 
 
 
 
 
38
  )
39
 
40
  with gr.Blocks() as demo:
 
41
  chatbot.render()
42
 
 
43
  if __name__ == "__main__":
44
+ demo.queue() # handles multiple users
45
  demo.launch()