FractalAIR commited on
Commit
71830ea
Β·
verified Β·
1 Parent(s): 82160cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -136
app.py CHANGED
@@ -3,177 +3,143 @@ import spaces
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  import torch
5
  from threading import Thread
 
 
6
 
7
- phi4_model_path = "microsoft/Phi-4-reasoning-plus"
 
8
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
- phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, device_map="auto", torch_dtype="auto")
12
- phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  @spaces.GPU(duration=60)
15
- def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state):
 
 
16
  if not user_message.strip():
17
  return history_state, history_state
18
-
19
- # Phi-4 model settings
20
- model = phi4_model
21
- tokenizer = phi4_tokenizer
22
- start_tag = "<|im_start|>"
23
- sep_tag = "<|im_sep|>"
24
- end_tag = "<|im_end|>"
25
-
26
- # Recommended prompt settings by Microsoft
27
- system_message = "Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:"
28
- prompt = f"{start_tag}system{sep_tag}{system_message}{end_tag}"
29
- for message in history_state:
30
- if message["role"] == "user":
31
- prompt += f"{start_tag}user{sep_tag}{message['content']}{end_tag}"
32
- elif message["role"] == "assistant" and message["content"]:
33
- prompt += f"{start_tag}assistant{sep_tag}{message['content']}{end_tag}"
34
- prompt += f"{start_tag}user{sep_tag}{user_message}{end_tag}{start_tag}assistant{sep_tag}"
35
 
 
36
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
37
-
38
- do_sample = not (temperature == 1.0 and top_k >= 100 and top_p == 1.0)
39
-
40
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
41
-
42
- # sampling techniques
43
- generation_kwargs = {
44
- "input_ids": inputs["input_ids"],
45
- "attention_mask": inputs["attention_mask"],
46
- "max_new_tokens": int(max_tokens),
47
- "do_sample": True,
48
- "temperature": 0.8,
49
- "top_k": int(top_k),
50
- "top_p": 0.95,
51
- "repetition_penalty": repetition_penalty,
52
- "streamer": streamer,
53
- }
54
 
55
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
56
  thread.start()
57
 
58
- # Stream the response
59
  assistant_response = ""
60
  new_history = history_state + [
61
  {"role": "user", "content": user_message},
62
  {"role": "assistant", "content": ""}
63
  ]
64
  for new_token in streamer:
65
- cleaned_token = new_token.replace("<|im_start|>", "").replace("<|im_sep|>", "").replace("<|im_end|>", "")
66
- assistant_response += cleaned_token
67
  new_history[-1]["content"] = assistant_response.strip()
68
  yield new_history, new_history
69
 
70
  yield new_history, new_history
71
 
 
72
  example_messages = {
73
- "Math reasoning": "If a rectangular prism has a length of 6 cm, a width of 4 cm, and a height of 5 cm, what is the length of the longest line segment that can be drawn from one vertex to another?",
74
- "Logic puzzle": "Four people (Alex, Blake, Casey, and Dana) each have a different favorite color (red, blue, green, yellow) and a different favorite fruit (apple, banana, cherry, date). Given the following clues: 1) The person who likes red doesn't like dates. 2) Alex likes yellow. 3) The person who likes blue likes cherries. 4) Blake doesn't like apples or bananas. 5) Casey doesn't like yellow or green. Who likes what color and what fruit?",
75
- "Physics problem": "A ball is thrown upward with an initial velocity of 15 m/s from a height of 2 meters above the ground. Assuming the acceleration due to gravity is 9.8 m/sΒ², determine: 1) The maximum height the ball reaches. 2) The total time the ball is in the air before hitting the ground. 3) The velocity with which the ball hits the ground."
 
76
  }
77
 
 
78
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
79
- gr.Markdown(
 
 
 
 
80
  """
81
- # Phi-4-reasoning-plus Chatbot
82
- Welcome to the Phi-4-reasoning-plus Chatbot! This model excels at multi-step reasoning tasks in mathematics, logic, and science.
83
-
84
- The model will provide responses with two sections:
85
- 1. **<think>**: A detailed step-by-step reasoning process showing its work
86
- 2. **Solution**: A concise, accurate final answer based on the reasoning
87
-
88
- Try the example problems below to see how the model breaks down complex reasoning problems.
89
  """
90
  )
91
-
92
- history_state = gr.State([])
 
 
 
93
 
94
  with gr.Row():
95
  with gr.Column(scale=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  gr.Markdown("### Settings")
97
- max_tokens_slider = gr.Slider(
98
- minimum=64,
99
- maximum=32768,
100
- step=1024,
101
- value=4096,
102
- label="Max Tokens"
 
 
103
  )
104
- with gr.Accordion("Advanced Settings", open=False):
105
- temperature_slider = gr.Slider(
106
- minimum=0.1,
107
- maximum=2.0,
108
- value=0.8,
109
- label="Temperature"
110
- )
111
- top_k_slider = gr.Slider(
112
- minimum=1,
113
- maximum=100,
114
- step=1,
115
- value=50,
116
- label="Top-k"
117
- )
118
- top_p_slider = gr.Slider(
119
- minimum=0.1,
120
- maximum=1.0,
121
- value=0.95,
122
- label="Top-p"
123
- )
124
- repetition_penalty_slider = gr.Slider(
125
- minimum=1.0,
126
- maximum=2.0,
127
- value=1.0,
128
- label="Repetition Penalty"
129
- )
130
-
131
  with gr.Column(scale=4):
132
- chatbot = gr.Chatbot(label="Chat", type="messages")
133
  with gr.Row():
134
- user_input = gr.Textbox(
135
- label="Your message",
136
- placeholder="Type your message here...",
137
- scale=3
138
- )
139
- submit_button = gr.Button("Send", variant="primary", scale=1)
140
- clear_button = gr.Button("Clear", scale=1)
141
- gr.Markdown("**Try these examples:**")
142
- with gr.Row():
143
- example1_button = gr.Button("Math reasoning")
144
- example2_button = gr.Button("Logic puzzle")
145
- example3_button = gr.Button("Physics problem")
146
-
147
- submit_button.click(
148
- fn=generate_response,
149
- inputs=[user_input, max_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repetition_penalty_slider, history_state],
150
- outputs=[chatbot, history_state]
151
- ).then(
152
- fn=lambda: gr.update(value=""),
153
- inputs=None,
154
- outputs=user_input
155
- )
156
-
157
- clear_button.click(
158
- fn=lambda: ([], []),
159
- inputs=None,
160
- outputs=[chatbot, history_state]
161
- )
162
-
163
- example1_button.click(
164
- fn=lambda: gr.update(value=example_messages["Math reasoning"]),
165
- inputs=None,
166
- outputs=user_input
167
- )
168
- example2_button.click(
169
- fn=lambda: gr.update(value=example_messages["Logic puzzle"]),
170
- inputs=None,
171
- outputs=user_input
172
- )
173
- example3_button.click(
174
- fn=lambda: gr.update(value=example_messages["Physics problem"]),
175
- inputs=None,
176
- outputs=user_input
177
- )
178
-
179
- demo.launch(ssr_mode=False)
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  import torch
5
  from threading import Thread
6
+ import re
7
+ import uuid
8
 
9
+ # ------------- MODEL LOADING -------------
10
+ model_name = "FractalAIResearch/Fathom-R1-14B" # CHANGE this if your org name or model is different
11
 
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
 
14
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+
17
+ # -------- MATH FORMATTING UTILITIES --------
18
+ def format_math(text):
19
+ text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
20
+ text = text.replace(r"\(", "$").replace(r"\)", "$")
21
+ return text
22
+
23
+ def generate_conversation_id():
24
+ return str(uuid.uuid4())[:8]
25
+
26
+ # --------- PROMPT CONSTRUCTION ---------
27
+ def construct_prompt(history_state, user_message, system_message):
28
+ # Adjust custom tags as per your Fathom R1 model prompt format. This is a typical chat prompt pattern.
29
+ prompt = f"[SYSTEM]\n{system_message.strip()}\n"
30
+ for m in history_state:
31
+ if m["role"] == "user":
32
+ prompt += f"[USER]\n{m['content']}\n"
33
+ elif m["role"] == "assistant":
34
+ prompt += f"[ASSISTANT]\n{m['content']}\n"
35
+ prompt += f"[USER]\n{user_message.strip()}\n[ASSISTANT]\n"
36
+ return prompt
37
+
38
+ # --------- SYSTEM PROMPT SETUP ---------
39
+ system_message = (
40
+ "Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. "
41
+ "Please structure your response into two main sections: Thought and Solution using the specified format: <think> ... </think> ... {Solution section}. "
42
+ "In the Thought section, detail your reasoning in steps. Then, systematically present the final solution that is logical and concise."
43
+ )
44
+
45
+ # ----------- GENERATION FUNCTION -----------
46
  @spaces.GPU(duration=60)
47
+ def generate_response(
48
+ user_message, max_tokens, temperature, top_p, history_state
49
+ ):
50
  if not user_message.strip():
51
  return history_state, history_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ prompt = construct_prompt(history_state, user_message, system_message)
54
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
55
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
56
+
57
+ generation_kwargs = dict(
58
+ input_ids=inputs["input_ids"],
59
+ attention_mask=inputs["attention_mask"],
60
+ max_new_tokens=int(max_tokens),
61
+ do_sample=True,
62
+ temperature=float(temperature),
63
+ top_p=float(top_p),
64
+ streamer=streamer,
65
+ )
 
 
 
 
 
 
66
 
67
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
68
  thread.start()
69
 
 
70
  assistant_response = ""
71
  new_history = history_state + [
72
  {"role": "user", "content": user_message},
73
  {"role": "assistant", "content": ""}
74
  ]
75
  for new_token in streamer:
76
+ assistant_response += new_token
77
+ # Optionally, you can call format_math() on assistant_response
78
  new_history[-1]["content"] = assistant_response.strip()
79
  yield new_history, new_history
80
 
81
  yield new_history, new_history
82
 
83
+ # ------------ EXAMPLE MESSAGES --------------
84
  example_messages = {
85
+ "IIT-JEE 2024 Mathematics": "A student appears for a quiz consisting of only true-false type questions and answers all the questions. The student knows the answers of some questions and guesses the answers for the remaining questions. Whenever the student knows the answer of a question, he gives the correct answer. Assume that probability of the student giving the correct answer for a question, given that he has guessed it, is $\\frac{1}{2}$. Also assume that the probability of the answer for a question being guessed, given that the student's answer is correct, is $\\frac{1}{6}$. Then the probability that the student knows the answer of a randomly chosen question is?",
86
+ "IIT-JEE 2025 Physics": "A person sitting inside an elevator performs a weighing experiment with an object of mass 50 kg. Suppose that the variation of the height 𝑦 (in m) of the elevator, from the ground, with time 𝑑 (in s) is given by 𝑦 = 8 [1 + sin ( 2πœ‹π‘‘/𝑇 )], where 𝑇 = 40πœ‹ s. Taking acceleration due to gravity, 𝑔 = 10 m/s^2 , the maximum variation of the object’s weight (in N) as observed in the experiment is ?",
87
+ "Goldman Sachs Interview Puzzle": "Four friends need to cross a dangerous bridge at night. Unfortunately, they have only one torch and the bridge is too dangerous to cross without one. The bridge is only strong enough to support two people at a time. Not all people take the same time to cross the bridge. Times for each person: 1 min, 2 mins, 7 mins and 10 mins. What is the shortest time needed for all four of them to cross the bridge?",
88
+ "IIT-JEE 2025 Mathematics": "Let 𝑆 be the set of all seven-digit numbers that can be formed using the digits 0, 1 and 2. For example, 2210222 is in 𝑆, but 0210222 is NOT in 𝑆.Then the number of elements π‘₯ in 𝑆 such that at least one of the digits 0 and 1 appears exactly twice in π‘₯, is ?"
89
  }
90
 
91
+ # --------------- UI (GRADIO) ----------------
92
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
93
+ conversations_state = gr.State({})
94
+ current_convo_id = gr.State(generate_conversation_id())
95
+ history_state = gr.State([])
96
+
97
+ gr.HTML(
98
  """
99
+ <div style="display: flex; align-items: center; gap: 16px; margin-bottom: 1em;">
100
+ <div style="background-color: black; padding: 6px; border-radius: 8px;">
101
+ <img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png" style="height: 48px;">
102
+ </div>
103
+ <h1 style="margin: 0;">Fathom R1 14B Chatbot</h1>
104
+ </div>
 
 
105
  """
106
  )
107
+
108
+ with gr.Sidebar():
109
+ gr.Markdown("## Conversations")
110
+ conversation_selector = gr.Radio(choices=[], label="Select Conversation", interactive=True)
111
+ new_convo_button = gr.Button("New Conversation βž•")
112
 
113
  with gr.Row():
114
  with gr.Column(scale=1):
115
+ gr.Markdown(
116
+ """
117
+ Welcome to the Fathom R1 14B Chatbot, developed by Fractal AI Research!
118
+
119
+ Our model excels at reasoning tasks in mathematics and science. Given that our model has been optimised for tasks requiring critical thinking, it might overthink for simple chat queries.
120
+
121
+ To check out our GitHub repository, click [here](https://github.com/FractalAIResearchLabs/Fathom-R1)
122
+
123
+ For training recipe details on how this model was built, please check [here](https://huggingface.co/FractalAIResearch/Fathom-R1-14B)
124
+
125
+ Try the example problems below from various popular entrance examinations and interviews or type in your own problems to see how our model breaks down and solves complex reasoning problems.
126
+
127
+ NOTE: Once you close this demo window, all currently saved conversations will be lost.
128
+ """
129
+ )
130
  gr.Markdown("### Settings")
131
+ max_tokens_slider = gr.Slider(minimum=6144, maximum=32768, step=1024, value=16384, label="Max Tokens")
132
+ with gr.Accordion("Advanced Settings", open=True):
133
+ temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.6, label="Temperature")
134
+ top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p")
135
+ gr.Markdown(
136
+ """
137
+ We sincerely acknowledge [VIDraft](https://huggingface.co/VIDraft) for their Phi 4 Reasoning Plus [space](https://huggingface.co/spaces/VIDraft/phi-4-reasoning-plus), which served as the starting point for this demo.
138
+ """
139
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  with gr.Column(scale=4):
141
+ chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
142
  with gr.Row():
143
+ user_input = gr.Textbox(label="User Input", placeholder="Type your question here...", lines=3, scale=8)
144
+ with gr.Column():
145
+ submit_button = gr.Button("Send", variant="primary", scale