kdevoe commited on
Commit
00baf90
·
verified ·
1 Parent(s): 5c9550b

Removing system message and loading FlanT5 base

Browse files
Files changed (1) hide show
  1. app.py +27 -38
app.py CHANGED
@@ -1,64 +1,53 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("google/flan-t5-xxl")
8
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
  max_tokens,
15
  temperature,
16
  top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
  demo.launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
+ # Load Flan-T5-base model and tokenizer from Hugging Face
5
+ model_name = "google/flan-t5-base"
6
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
 
13
  max_tokens,
14
  temperature,
15
  top_p,
16
  ):
17
+ # Prepare the input by concatenating the history into a dialogue format
18
+ input_text = ""
19
+ for user_msg, bot_msg in history:
20
+ input_text += f"User: {user_msg} Assistant: {bot_msg} "
21
+ input_text += f"User: {message}"
22
+
23
+ # Tokenize the input text
24
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
25
+
26
+ # Generate the response using Flan-T5-base
27
+ output_tokens = model.generate(
28
+ inputs["input_ids"],
29
+ max_length=max_tokens,
 
 
 
30
  temperature=temperature,
31
  top_p=top_p,
32
+ do_sample=True,
33
+ )
34
 
35
+ # Decode and return the assistant's response
36
+ response = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
37
+ yield response
38
 
39
 
40
+ # Define the Gradio interface
 
 
41
  demo = gr.ChatInterface(
42
  respond,
43
  additional_inputs=[
 
44
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
45
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
46
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
47
  ],
48
  )
49
 
 
50
  if __name__ == "__main__":
51
  demo.launch()
52
+
53
+