esha111 commited on
Commit
7d9686c
·
verified ·
1 Parent(s): c922463

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -28
app.py CHANGED
@@ -1,33 +1,65 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
-
4
- # Load the text generation pipeline
5
- print("Loading model pipeline...")
6
- pipe = pipeline("text-generation", model="esha111/id2223_lab2_correct")
7
- print("Pipeline loaded.")
8
-
9
- # Function to handle user input
10
- def generate_response(user_input):
11
- try:
12
- # Prepare input message
13
- messages = [{"role": "user", "content": user_input}]
14
-
15
- # Generate response using the pipeline
16
- response = pipe(messages)
17
- return response[1]["content"]
18
- except Exception as e:
19
- return f"Error: {e}"
20
-
21
- # Create a Gradio interface
22
- demo = gr.Interface(
23
- fn=generate_response, # Function to call
24
- inputs="text", # Input type: single text field
25
- outputs="text", # Output type: single text field
26
- title="Chat with Model",
27
- description="Type a message to interact with the model.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  )
29
 
30
- # Launch the Gradio app
31
  if __name__ == "__main__":
32
- print("Launching Gradio app...")
33
  demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ Cloned from https://huggingface.co/spaces/gradio-templates/chatbot - and added the history toggle to the interface
6
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ """
8
+
9
+ client = InferenceClient("esha111/id2223_lab2_correct")
10
+
11
+
12
+ def respond(
13
+ message,
14
+ history: list[tuple[str, str]],
15
+ system_message,
16
+ use_history,
17
+ max_tokens,
18
+ temperature,
19
+ top_p,
20
+ ):
21
+ messages = [{"role": "system", "content": system_message}]
22
+
23
+ # Only use history if the toggle is on
24
+ if use_history == "Yes":
25
+ for val in history:
26
+ if val[0]:
27
+ messages.append({"role": "user", "content": val[0]})
28
+ if val[1]:
29
+ messages.append({"role": "assistant", "content": val[1]})
30
+
31
+ messages.append({"role": "user", "content": message})
32
+
33
+ response = ""
34
+
35
+
36
+ for message in client.chat_completion(
37
+ messages,
38
+ max_tokens=max_tokens,
39
+ stream=True,
40
+ temperature=temperature,
41
+ top_p=top_p,
42
+ ):
43
+ token = message.choices[0].delta.content
44
+
45
+ response += token
46
+ yield response
47
+
48
+ # Then add to history (even if the toggle is of)
49
+ history.append((message, response))
50
+
51
+
52
+ demo = gr.ChatInterface(
53
+ respond,
54
+ additional_inputs=[
55
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
56
+ gr.Radio(["Yes", "No"], value="Yes", label="Use chat history"),
57
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
58
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
59
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ),
60
+ ],
61
  )
62
 
63
+
64
  if __name__ == "__main__":
 
65
  demo.launch()