FractalAIR commited on
Commit
681a9ea
·
verified ·
1 Parent(s): 71830ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -72
app.py CHANGED
@@ -6,64 +6,55 @@ from threading import Thread
6
  import re
7
  import uuid
8
 
9
- # ------------- MODEL LOADING -------------
10
- model_name = "FractalAIResearch/Fathom-R1-14B" # CHANGE this if your org name or model is different
 
11
 
12
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
-
14
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto")
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
 
17
- # -------- MATH FORMATTING UTILITIES --------
18
  def format_math(text):
19
  text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
20
  text = text.replace(r"\(", "$").replace(r"\)", "$")
21
  return text
22
 
 
23
  def generate_conversation_id():
24
  return str(uuid.uuid4())[:8]
25
 
26
- # --------- PROMPT CONSTRUCTION ---------
27
- def construct_prompt(history_state, user_message, system_message):
28
- # Adjust custom tags as per your Fathom R1 model prompt format. This is a typical chat prompt pattern.
29
- prompt = f"[SYSTEM]\n{system_message.strip()}\n"
30
- for m in history_state:
31
- if m["role"] == "user":
32
- prompt += f"[USER]\n{m['content']}\n"
33
- elif m["role"] == "assistant":
34
- prompt += f"[ASSISTANT]\n{m['content']}\n"
35
- prompt += f"[USER]\n{user_message.strip()}\n[ASSISTANT]\n"
36
- return prompt
37
-
38
- # --------- SYSTEM PROMPT SETUP ---------
39
- system_message = (
40
- "Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. "
41
- "Please structure your response into two main sections: Thought and Solution using the specified format: <think> ... </think> ... {Solution section}. "
42
- "In the Thought section, detail your reasoning in steps. Then, systematically present the final solution that is logical and concise."
43
- )
44
-
45
- # ----------- GENERATION FUNCTION -----------
46
  @spaces.GPU(duration=60)
47
- def generate_response(
48
- user_message, max_tokens, temperature, top_p, history_state
49
- ):
50
  if not user_message.strip():
51
  return history_state, history_state
52
 
53
- prompt = construct_prompt(history_state, user_message, system_message)
54
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
55
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
56
-
57
- generation_kwargs = dict(
58
- input_ids=inputs["input_ids"],
59
- attention_mask=inputs["attention_mask"],
60
- max_new_tokens=int(max_tokens),
61
- do_sample=True,
62
- temperature=float(temperature),
63
- top_p=float(top_p),
64
- streamer=streamer,
65
  )
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
68
  thread.start()
69
 
@@ -72,23 +63,23 @@ def generate_response(
72
  {"role": "user", "content": user_message},
73
  {"role": "assistant", "content": ""}
74
  ]
75
- for new_token in streamer:
76
- assistant_response += new_token
77
- # Optionally, you can call format_math() on assistant_response
78
  new_history[-1]["content"] = assistant_response.strip()
79
  yield new_history, new_history
80
 
81
  yield new_history, new_history
82
 
83
- # ------------ EXAMPLE MESSAGES --------------
84
  example_messages = {
85
- "IIT-JEE 2024 Mathematics": "A student appears for a quiz consisting of only true-false type questions and answers all the questions. The student knows the answers of some questions and guesses the answers for the remaining questions. Whenever the student knows the answer of a question, he gives the correct answer. Assume that probability of the student giving the correct answer for a question, given that he has guessed it, is $\\frac{1}{2}$. Also assume that the probability of the answer for a question being guessed, given that the student's answer is correct, is $\\frac{1}{6}$. Then the probability that the student knows the answer of a randomly chosen question is?",
86
- "IIT-JEE 2025 Physics": "A person sitting inside an elevator performs a weighing experiment with an object of mass 50 kg. Suppose that the variation of the height 𝑦 (in m) of the elevator, from the ground, with time 𝑡 (in s) is given by 𝑦 = 8 [1 + sin ( 2𝜋𝑡/𝑇 )], where 𝑇 = 40𝜋 s. Taking acceleration due to gravity, 𝑔 = 10 m/s^2 , the maximum variation of the object’s weight (in N) as observed in the experiment is ?",
87
- "Goldman Sachs Interview Puzzle": "Four friends need to cross a dangerous bridge at night. Unfortunately, they have only one torch and the bridge is too dangerous to cross without one. The bridge is only strong enough to support two people at a time. Not all people take the same time to cross the bridge. Times for each person: 1 min, 2 mins, 7 mins and 10 mins. What is the shortest time needed for all four of them to cross the bridge?",
88
- "IIT-JEE 2025 Mathematics": "Let 𝑆 be the set of all seven-digit numbers that can be formed using the digits 0, 1 and 2. For example, 2210222 is in 𝑆, but 0210222 is NOT in 𝑆.Then the number of elements 𝑥 in 𝑆 such that at least one of the digits 0 and 1 appears exactly twice in 𝑥, is ?"
89
  }
90
 
91
- # --------------- UI (GRADIO) ----------------
92
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
93
  conversations_state = gr.State({})
94
  current_convo_id = gr.State(generate_conversation_id())
@@ -98,7 +89,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
98
  """
99
  <div style="display: flex; align-items: center; gap: 16px; margin-bottom: 1em;">
100
  <div style="background-color: black; padding: 6px; border-radius: 8px;">
101
- <img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png" style="height: 48px;">
102
  </div>
103
  <h1 style="margin: 0;">Fathom R1 14B Chatbot</h1>
104
  </div>
@@ -112,34 +103,84 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
112
 
113
  with gr.Row():
114
  with gr.Column(scale=1):
115
- gr.Markdown(
116
- """
117
- Welcome to the Fathom R1 14B Chatbot, developed by Fractal AI Research!
118
-
119
- Our model excels at reasoning tasks in mathematics and science. Given that our model has been optimised for tasks requiring critical thinking, it might overthink for simple chat queries.
120
-
121
- To check out our GitHub repository, click [here](https://github.com/FractalAIResearchLabs/Fathom-R1)
122
-
123
- For training recipe details on how this model was built, please check [here](https://huggingface.co/FractalAIResearch/Fathom-R1-14B)
124
-
125
- Try the example problems below from various popular entrance examinations and interviews or type in your own problems to see how our model breaks down and solves complex reasoning problems.
126
-
127
- NOTE: Once you close this demo window, all currently saved conversations will be lost.
128
- """
129
- )
130
  gr.Markdown("### Settings")
131
  max_tokens_slider = gr.Slider(minimum=6144, maximum=32768, step=1024, value=16384, label="Max Tokens")
132
  with gr.Accordion("Advanced Settings", open=True):
133
  temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.6, label="Temperature")
134
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p")
135
- gr.Markdown(
136
- """
137
- We sincerely acknowledge [VIDraft](https://huggingface.co/VIDraft) for their Phi 4 Reasoning Plus [space](https://huggingface.co/spaces/VIDraft/phi-4-reasoning-plus), which served as the starting point for this demo.
138
- """
139
- )
140
  with gr.Column(scale=4):
141
  chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
142
  with gr.Row():
143
  user_input = gr.Textbox(label="User Input", placeholder="Type your question here...", lines=3, scale=8)
144
  with gr.Column():
145
- submit_button = gr.Button("Send", variant="primary", scale
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import re
7
  import uuid
8
 
9
+ # ------------ MODEL SETUP ------------
10
+ model_name = "FractalAIResearch/Fathom-R1-14B" # adjust if using a local path
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
13
+ model = AutoModelForCausalLM.from_pretrained(
14
+ model_name, torch_dtype=torch.float16, device_map="auto"
15
+ )
16
  tokenizer = AutoTokenizer.from_pretrained(model_name)
17
 
18
+ # ------------ MATH FORMATTING ------------
19
  def format_math(text):
20
  text = re.sub(r"\[(.*?)\]", r"$$\1$$", text, flags=re.DOTALL)
21
  text = text.replace(r"\(", "$").replace(r"\)", "$")
22
  return text
23
 
24
+ # ------------ SESSION & HISTORY MGMT ------------
25
  def generate_conversation_id():
26
  return str(uuid.uuid4())[:8]
27
 
28
+ # ------------ GENERATION FUNCTION ------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  @spaces.GPU(duration=60)
30
+ def generate_response(user_message, max_tokens, temperature, top_p, history_state):
 
 
31
  if not user_message.strip():
32
  return history_state, history_state
33
 
34
+ system_prompt = (
35
+ "You are an advanced math and science assistant developed by Fractal AI Research. "
36
+ "You solve problems step-by-step with detailed reasoning. Think deeply and clearly."
 
 
 
 
 
 
 
 
 
37
  )
38
 
39
+ prompt = f"System: {system_prompt}\n"
40
+ for m in history_state:
41
+ role = "User" if m["role"] == "user" else "Assistant"
42
+ prompt += f"{role}: {m['content'].strip()}\n"
43
+ prompt += f"User: {user_message.strip()}\nAssistant:"
44
+
45
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
46
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
47
+
48
+ generation_kwargs = {
49
+ "input_ids": inputs["input_ids"],
50
+ "attention_mask": inputs["attention_mask"],
51
+ "max_new_tokens": int(max_tokens),
52
+ "temperature": temperature,
53
+ "top_p": top_p,
54
+ "do_sample": True,
55
+ "streamer": streamer,
56
+ }
57
+
58
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
59
  thread.start()
60
 
 
63
  {"role": "user", "content": user_message},
64
  {"role": "assistant", "content": ""}
65
  ]
66
+
67
+ for token in streamer:
68
+ assistant_response += token
69
  new_history[-1]["content"] = assistant_response.strip()
70
  yield new_history, new_history
71
 
72
  yield new_history, new_history
73
 
74
+ # ------------ EXAMPLES ------------
75
  example_messages = {
76
+ "IIT-JEE 2024 Mathematics": "A student appears for a quiz consisting of only true-false type questions...",
77
+ "IIT-JEE 2025 Physics": "A person sitting inside an elevator performs a weighing experiment...",
78
+ "Goldman Sachs Interview Puzzle": "Four friends need to cross a dangerous bridge at night...",
79
+ "IIT-JEE 2025 Mathematics": "Let 𝑆 be the set of all seven-digit numbers that can be formed using the digits 0, 1 and 2..."
80
  }
81
 
82
+ # ------------ UI LAYOUT ------------
83
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
84
  conversations_state = gr.State({})
85
  current_convo_id = gr.State(generate_conversation_id())
 
89
  """
90
  <div style="display: flex; align-items: center; gap: 16px; margin-bottom: 1em;">
91
  <div style="background-color: black; padding: 6px; border-radius: 8px;">
92
+ <img src="https://framerusercontent.com/images/j0KjQQyrUfkFw4NwSaxQOLAoBU.png" alt="Fractal AI Logo" style="height: 48px;">
93
  </div>
94
  <h1 style="margin: 0;">Fathom R1 14B Chatbot</h1>
95
  </div>
 
103
 
104
  with gr.Row():
105
  with gr.Column(scale=1):
106
+ gr.Markdown("""
107
+ Welcome to the Fathom R1 14B Chatbot, developed by Fractal AI Research!
108
+
109
+ Our model excels at reasoning tasks in mathematics and science. Given that our model has been optimised for tasks requiring critical thinking, it might overthink for simple chat queries.
110
+
111
+ To check out our GitHub repository, click [here](https://github.com/FractalAIResearchLabs/Fathom-R1)
112
+
113
+ For training recipe details on how this model was built, please check [here](https://huggingface.co/FractalAIResearch/Fathom-R1-14B)
114
+
115
+ Try the example problems below from various popular entrance examinations and interviews or type in your own problems to see how our model breaks down and solves complex reasoning problems.
116
+ """)
117
+
 
 
 
118
  gr.Markdown("### Settings")
119
  max_tokens_slider = gr.Slider(minimum=6144, maximum=32768, step=1024, value=16384, label="Max Tokens")
120
  with gr.Accordion("Advanced Settings", open=True):
121
  temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.6, label="Temperature")
122
  top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p")
123
+
124
+ gr.Markdown("We sincerely acknowledge [VIDraft](https://huggingface.co/VIDraft) for their Phi 4 Reasoning Plus [space](https://huggingface.co/spaces/VIDraft/phi-4-reasoning-plus), which served as the starting point for this demo.")
125
+
 
 
126
  with gr.Column(scale=4):
127
  chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
128
  with gr.Row():
129
  user_input = gr.Textbox(label="User Input", placeholder="Type your question here...", lines=3, scale=8)
130
  with gr.Column():
131
+ submit_button = gr.Button("Send", variant="primary", scale=1)
132
+ clear_button = gr.Button("Clear", scale=1)
133
+ gr.Markdown("**Try these examples:**")
134
+ with gr.Row():
135
+ example1_button = gr.Button("IIT-JEE 2025 Mathematics")
136
+ example2_button = gr.Button("IIT-JEE 2025 Physics")
137
+ example3_button = gr.Button("Goldman Sachs Interview Puzzle")
138
+ example4_button = gr.Button("IIT-JEE 2024 Mathematics")
139
+
140
+ def update_conversation_list(conversations):
141
+ return [conversations[cid]["title"] for cid in conversations]
142
+
143
+ def start_new_conversation(conversations):
144
+ new_id = generate_conversation_id()
145
+ conversations[new_id] = {"title": f"New Conversation {new_id}", "messages": []}
146
+ return new_id, [], gr.update(choices=update_conversation_list(conversations), value=conversations[new_id]["title"]), conversations
147
+
148
+ def load_conversation(selected_title, conversations):
149
+ for cid, convo in conversations.items():
150
+ if convo["title"] == selected_title:
151
+ return cid, convo["messages"], convo["messages"]
152
+ return current_convo_id.value, history_state.value, history_state.value
153
+
154
+ def send_message(user_message, max_tokens, temperature, top_p, convo_id, history, conversations):
155
+ if convo_id not in conversations:
156
+ title = " ".join(user_message.strip().split()[:5])
157
+ conversations[convo_id] = {"title": title, "messages": history}
158
+ if conversations[convo_id]["title"].startswith("New Conversation"):
159
+ conversations[convo_id]["title"] = " ".join(user_message.strip().split()[:5])
160
+
161
+ for updated_history, new_history in generate_response(user_message, max_tokens, temperature, top_p, history):
162
+ conversations[convo_id]["messages"] = new_history
163
+ yield updated_history, new_history, gr.update(choices=update_conversation_list(conversations), value=conversations[convo_id]["title"]), conversations
164
+
165
+ submit_button.click(
166
+ fn=send_message,
167
+ inputs=[user_input, max_tokens_slider, temperature_slider, top_p_slider, current_convo_id, history_state, conversations_state],
168
+ outputs=[chatbot, history_state, conversation_selector, conversations_state],
169
+ concurrency_limit=16
170
+ ).then(
171
+ fn=lambda: gr.update(value=""),
172
+ inputs=None,
173
+ outputs=user_input
174
+ )
175
+
176
+ clear_button.click(fn=lambda: ([], []), inputs=None, outputs=[chatbot, history_state])
177
+ new_convo_button.click(fn=start_new_conversation, inputs=[conversations_state], outputs=[current_convo_id, history_state, conversation_selector, conversations_state])
178
+ conversation_selector.change(fn=load_conversation, inputs=[conversation_selector, conversations_state], outputs=[current_convo_id, history_state, chatbot])
179
+ example1_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2025 Mathematics"]), inputs=None, outputs=user_input)
180
+ example2_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2025 Physics"]), inputs=None, outputs=user_input)
181
+ example3_button.click(fn=lambda: gr.update(value=example_messages["Goldman Sachs Interview Puzzle"]), inputs=None, outputs=user_input)
182
+ example4_button.click(fn=lambda: gr.update(value=example_messages["IIT-JEE 2024 Mathematics"]), inputs=None, outputs=user_input)
183
+
184
+ # ----------- LAUNCH APP -----------
185
+ if __name__ == "__main__":
186
+ demo.queue().launch(share=True, ssr_mode=False)