GoidaAlignment commited on
Commit
52ccc5d
verified
1 Parent(s): c1ba88e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -46
app.py CHANGED
@@ -1,62 +1,149 @@
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("GoidaAlignment/GOIDA-0.5B")
8
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- max_tokens,
14
- temperature,
15
- top_p,
 
 
 
 
 
 
 
 
 
 
16
  ):
17
- messages = []
 
18
 
19
- for val in history:
20
- if val[0]:
21
- messages.append({"role": "user", "content": val[0]})
22
- if val[1]:
23
- messages.append({"role": "assistant", "content": val[1]})
 
24
 
25
- messages.append({"role": "user", "content": message})
26
 
27
- response = ""
 
 
28
 
29
- for message in client.chat_completion(
30
- messages,
31
- max_tokens=max_tokens,
32
- stream=True,
33
- temperature=temperature,
34
- top_p=top_p,
35
- ):
36
- token = message.choices[0].delta.content
 
 
 
 
 
37
 
38
- response += token
39
- yield response
 
 
 
 
 
 
40
 
 
 
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
49
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
50
- gr.Slider(
51
- minimum=0.1,
52
- maximum=1.0,
53
- value=0.95,
54
- step=0.05,
55
- label="Top-p (nucleus sampling)",
56
- ),
57
- ],
58
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
 
61
  if __name__ == "__main__":
62
- demo.launch()
 
1
+ import os
2
+ import time
3
+ #import spaces
4
+ import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
  import gradio as gr
7
+ from threading import Thread
8
 
9
+ MODEL_LIST = ["GoidaAlignment/GOIDA-0.5B"]
10
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
11
+
12
+ TITLE = "<h1><center>携 小袣袗袟袗袥 袚袨袨袨袡袛袗!</center></h1>"
13
+
14
+ PLACEHOLDER = """
15
+ <center>
16
+ <p>袚袨袨袨袨袡袛袗!!</p>
17
+ </center>
18
  """
 
 
 
19
 
20
+ # pip install transformers
21
+ from transformers import AutoModelForCausalLM, AutoTokenizer
22
+
23
+ device = "cpu" # for GPU usage or "cpu" for CPU usage
24
+
25
+ tokenizer0 = AutoTokenizer.from_pretrained(MODEL_LIST[0])
26
+ model0 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[0]).to(device)
27
 
28
+ tokenizer1 = AutoTokenizer.from_pretrained(MODEL_LIST[1])
29
+ model1 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[1]).to(device)
30
+
31
+ tokenizer2 = AutoTokenizer.from_pretrained(MODEL_LIST[2])
32
+ model2 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[2]).to(device)
33
+
34
+ #@spaces.GPU()
35
+ def stream_chat(
36
+ message: str,
37
+ history: list,
38
+ temperature: float = 0.8,
39
+ max_new_tokens: int = 1024,
40
+ top_p: float = 1.0,
41
+ top_k: int = 20,
42
+ penalty: float = 1.2,
43
+ choice: str = "GoidaAlignment/GOIDA-0.5B"
44
  ):
45
+ print(f'message: {message}')
46
+ print(f'history: {history}')
47
 
48
+ conversation = []
49
+ for prompt, answer in history:
50
+ conversation.extend([
51
+ {"role": "user", "content": prompt},
52
+ {"role": "assistant", "content": answer},
53
+ ])
54
 
55
+ conversation.append({"role": "user", "content": message})
56
 
57
+
58
+ model = model2
59
+ tokenizer = tokenizer2
60
 
61
+ input_text=tokenizer.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
62
+ inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
63
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
64
+
65
+ generate_kwargs = dict(
66
+ input_ids=inputs,
67
+ max_new_tokens = max_new_tokens,
68
+ do_sample = False if temperature == 0 else True,
69
+ top_p = top_p,
70
+ top_k = top_k,
71
+ temperature = temperature,
72
+ streamer=streamer,
73
+ )
74
 
75
+ with torch.no_grad():
76
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
77
+ thread.start()
78
+
79
+ buffer = ""
80
+ for new_text in streamer:
81
+ buffer += new_text
82
+ yield buffer
83
 
84
+
85
+ #print(tokenizer.decode(outputs[0]))
86
 
87
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
88
+
89
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
90
+ gr.HTML(TITLE)
91
+ gr.ChatInterface(
92
+ fn=stream_chat,
93
+ chatbot=chatbot,
94
+ fill_height=True,
95
+ additional_inputs_accordion=gr.Accordion(label="鈿欙笍 Parameters", open=False, render=False),
96
+ additional_inputs=[
97
+ gr.Slider(
98
+ minimum=0,
99
+ maximum=1,
100
+ step=0.1,
101
+ value=0.4,
102
+ label="Temperature",
103
+ render=False,
104
+ ),
105
+ gr.Slider(
106
+ minimum=128,
107
+ maximum=8192,
108
+ step=1,
109
+ value=1024,
110
+ label="Max new tokens",
111
+ render=False,
112
+ ),
113
+ gr.Slider(
114
+ minimum=0.0,
115
+ maximum=1.0,
116
+ step=0.1,
117
+ value=1.0,
118
+ label="top_p",
119
+ render=False,
120
+ ),
121
+ gr.Slider(
122
+ minimum=1,
123
+ maximum=20,
124
+ step=1,
125
+ value=20,
126
+ label="top_k",
127
+ render=False,
128
+ ),
129
+ gr.Slider(
130
+ minimum=0.0,
131
+ maximum=2.0,
132
+ step=0.1,
133
+ value=1.2,
134
+ label="Repetition penalty",
135
+ render=False,
136
+ ),
137
+ gr.Radio(
138
+ ["GoidaAlignment/GOIDA-0.5B"],
139
+ value="494M",
140
+ label="Load Model",
141
+ render=False,
142
+ ),
143
+ ],
144
+ cache_examples=False,
145
+ )
146
 
147
 
148
  if __name__ == "__main__":
149
+ demo.launch()