Seshathri commited on
Commit
4594d95
·
verified ·
1 Parent(s): c58a49d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -62
app.py CHANGED
@@ -1,70 +1,122 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="meta-llama/Llama-2-7b-chat-hf")
18
 
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
 
70
  demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ class VibeThinkerChat:
6
+ def __init__(self, model_path="WeiboAI/VibeThinker-1.5B"):
7
+ print("Loading model and tokenizer...")
8
+ self.model = AutoModelForCausalLM.from_pretrained(
9
+ model_path,
10
+ torch_dtype=torch.bfloat16,
11
+ trust_remote_code=True,
12
+ device_map="auto"
13
+ )
14
+ self.tokenizer = AutoTokenizer.from_pretrained(
15
+ model_path,
16
+ trust_remote_code=True
17
+ )
18
+ print("Model loaded successfully!")
19
+
20
+ def generate_response(self, prompt, temperature=0.6, max_tokens=40960, top_p=0.95):
21
+ messages = [
22
+ {"role": "user", "content": prompt}
23
+ ]
24
+
25
+ text = self.tokenizer.apply_chat_template(
26
+ messages,
27
+ tokenize=False,
28
+ add_generation_prompt=True
29
+ )
30
+
31
+ model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
32
+
33
+ generation_config = dict(
34
+ max_new_tokens=max_tokens,
35
+ do_sample=True,
36
+ temperature=temperature,
37
+ top_p=top_p,
38
+ top_k=-1
39
+ )
40
+
41
+ generated_ids = self.model.generate(
42
+ model_inputs.input_ids,
43
+ **generation_config
44
+ )
45
+
46
+ generated_ids = [
47
+ output_ids[len(input_ids):]
48
+ for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
49
+ ]
50
+
51
+ response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
52
+ return response
53
 
54
+ # Initialize model
55
+ chat_model = VibeThinkerChat()
 
 
 
 
 
 
 
 
 
 
 
56
 
57
+ def chat_interface(message, history, temperature, max_tokens):
58
+ try:
59
+ response = chat_model.generate_response(
60
+ message,
61
+ temperature=temperature,
62
+ max_tokens=max_tokens
63
+ )
64
+ return response
65
+ except Exception as e:
66
+ return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
+ # Create Gradio interface
69
+ with gr.Blocks(title="VibeThinker-1.5B Chat") as demo:
70
+ gr.Markdown("# 🧠 VibeThinker-1.5B Chat Interface")
71
+ gr.Markdown("A 1.5B parameter reasoning model optimized for math and coding problems.")
72
+
73
+ with gr.Row():
74
+ with gr.Column(scale=3):
75
+ chatbot = gr.Chatbot(height=500)
76
+ msg = gr.Textbox(
77
+ label="Your Message",
78
+ placeholder="Ask a math or coding question...",
79
+ lines=3
80
+ )
81
+ with gr.Row():
82
+ submit = gr.Button("Submit", variant="primary")
83
+ clear = gr.Button("Clear")
84
+
85
+ with gr.Column(scale=1):
86
+ temperature = gr.Slider(
87
+ minimum=0.1,
88
+ maximum=2.0,
89
+ value=0.6,
90
+ step=0.1,
91
+ label="Temperature",
92
+ info="Recommended: 0.6 or 1.0"
93
+ )
94
+ max_tokens = gr.Slider(
95
+ minimum=512,
96
+ maximum=40960,
97
+ value=4096,
98
+ step=512,
99
+ label="Max Tokens",
100
+ info="Maximum response length"
101
+ )
102
+
103
+ def user_message(user_msg, history):
104
+ return "", history + [[user_msg, None]]
105
+
106
+ def bot_response(history, temp, max_tok):
107
+ user_msg = history[-1][0]
108
+ bot_msg = chat_interface(user_msg, history, temp, max_tok)
109
+ history[-1][1] = bot_msg
110
+ return history
111
+
112
+ msg.submit(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
113
+ bot_response, [chatbot, temperature, max_tokens], chatbot
114
+ )
115
+ submit.click(user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
116
+ bot_response, [chatbot, temperature, max_tokens], chatbot
117
+ )
118
+ clear.click(lambda: None, None, chatbot, queue=False)
119
 
120
  if __name__ == "__main__":
121
+ demo.queue()
122
  demo.launch()