Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,192 +3,183 @@ import spaces
|
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 4 |
import torch
|
| 5 |
from threading import Thread
|
| 6 |
-
import
|
| 7 |
-
import
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
our_model = AutoModelForCausalLM.from_pretrained(our_model_path, device_map="auto", torch_dtype="auto")
|
| 14 |
-
our_tokenizer = AutoTokenizer.from_pretrained(our_model_path)
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
return text
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
|
| 27 |
@spaces.GPU(duration=60)
|
| 28 |
-
def generate_response(user_message, max_tokens, temperature, top_p, history_state):
|
| 29 |
if not user_message.strip():
|
|
|
|
| 30 |
return history_state, history_state
|
| 31 |
|
| 32 |
-
model = our_model
|
| 33 |
-
tokenizer = our_tokenizer
|
| 34 |
-
start_tag = "<|im_start|>"
|
| 35 |
-
sep_tag = "<|im_sep|>"
|
| 36 |
-
end_tag = "<|im_end|>"
|
| 37 |
-
|
| 38 |
-
system_message = "Your role as an assistant..."
|
| 39 |
-
prompt = f"{start_tag}system{sep_tag}{system_message}{end_tag}"
|
| 40 |
-
for message in history_state:
|
| 41 |
-
if message["role"] == "user":
|
| 42 |
-
prompt += f"{start_tag}user{sep_tag}{message['content']}{end_tag}"
|
| 43 |
-
elif message["role"] == "assistant" and message["content"]:
|
| 44 |
-
prompt += f"{start_tag}assistant{sep_tag}{message['content']}{end_tag}"
|
| 45 |
-
prompt += f"{start_tag}user{sep_tag}{user_message}{end_tag}{start_tag}assistant{sep_tag}"
|
| 46 |
-
|
| 47 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
| 48 |
-
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 49 |
-
|
| 50 |
-
generation_kwargs = {
|
| 51 |
-
"input_ids": inputs["input_ids"],
|
| 52 |
-
"attention_mask": inputs["attention_mask"],
|
| 53 |
-
"max_new_tokens": int(max_tokens),
|
| 54 |
-
"do_sample": True,
|
| 55 |
-
"temperature": temperature,
|
| 56 |
-
"top_k": 50,
|
| 57 |
-
"top_p": top_p,
|
| 58 |
-
"repetition_penalty": 1.0,
|
| 59 |
-
"pad_token_id": tokenizer.eos_token_id,
|
| 60 |
-
"streamer": streamer,
|
| 61 |
-
}
|
| 62 |
-
|
| 63 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 65 |
thread.start()
|
| 66 |
-
except Exception:
|
| 67 |
-
yield history_state + [{"role": "user", "content": user_message}, {"role": "assistant", "content": "⚠️ Generation failed."}], history_state
|
| 68 |
-
return
|
| 69 |
-
|
| 70 |
-
assistant_response = ""
|
| 71 |
-
new_history = history_state + [
|
| 72 |
-
{"role": "user", "content": user_message},
|
| 73 |
-
{"role": "assistant", "content": ""}
|
| 74 |
-
]
|
| 75 |
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
for new_token in streamer:
|
| 78 |
-
|
| 79 |
-
continue
|
| 80 |
-
cleaned_token = new_token.replace("<|im_start|>", "").replace("<|im_sep|>", "").replace("<|im_end|>", "")
|
| 81 |
-
assistant_response += cleaned_token
|
| 82 |
new_history[-1]["content"] = assistant_response.strip()
|
| 83 |
yield new_history, new_history
|
| 84 |
-
except Exception:
|
| 85 |
-
pass
|
| 86 |
|
| 87 |
-
|
|
|
|
| 88 |
|
|
|
|
|
|
|
|
|
|
| 89 |
|
|
|
|
| 90 |
example_messages = {
|
| 91 |
-
"
|
| 92 |
-
"
|
| 93 |
-
"
|
| 94 |
-
"JEE Main 2025 Laws of Motion": "A massless spring gets elongated by amount x_1 under a tension of 5 N . Its elongation is x_2 under the tension of 7 N . For the elongation of 5x_1 - 2x_2 , the tension in the spring will be?"
|
| 95 |
}
|
| 96 |
|
|
|
|
| 97 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
)
|
| 110 |
-
|
| 111 |
-
with gr.Sidebar():
|
| 112 |
-
gr.Markdown("## Conversations")
|
| 113 |
-
conversation_selector = gr.Radio(choices=[], label="Select Conversation", interactive=True)
|
| 114 |
-
new_convo_button = gr.Button("New Conversation ➕")
|
| 115 |
-
|
| 116 |
-
current_convo_id = gr.State(generate_conversation_id())
|
| 117 |
history_state = gr.State([])
|
| 118 |
|
| 119 |
with gr.Row():
|
| 120 |
with gr.Column(scale=1):
|
| 121 |
-
# INTRO TEXT MOVED HERE
|
| 122 |
-
gr.Markdown(
|
| 123 |
-
"""
|
| 124 |
-
Welcome to the Ramanujan Ganit R1 14B V1 Chatbot, developed by Fractal AI Research!
|
| 125 |
-
|
| 126 |
-
Our model excels at reasoning tasks in mathematics and science.
|
| 127 |
-
|
| 128 |
-
Try the example problems below from JEE Main 2025 or type in your own problems to see how our model breaks down complex reasoning problems.
|
| 129 |
-
|
| 130 |
-
Please note that once you close this demo window, all currently saved conversations will be lost.
|
| 131 |
-
"""
|
| 132 |
-
)
|
| 133 |
-
|
| 134 |
gr.Markdown("### Settings")
|
| 135 |
-
max_tokens_slider = gr.Slider(
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
with gr.Column(scale=4):
|
| 148 |
-
|
| 149 |
-
chatbot = gr.Chatbot(label="Chat", type="messages", height=520)
|
| 150 |
with gr.Row():
|
| 151 |
-
user_input = gr.Textbox(
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
|
|
|
|
|
|
|
|
|
| 155 |
gr.Markdown("**Try these examples:**")
|
| 156 |
with gr.Row():
|
| 157 |
-
example1_button = gr.Button("
|
| 158 |
-
example2_button = gr.Button("
|
| 159 |
-
example3_button = gr.Button("
|
| 160 |
-
example4_button = gr.Button("JEE Main 2025\nLaws of Motion")
|
| 161 |
-
|
| 162 |
-
def update_conversation_list():
|
| 163 |
-
return [conversations[cid]["title"] for cid in conversations]
|
| 164 |
-
|
| 165 |
-
def start_new_conversation():
|
| 166 |
-
new_id = generate_conversation_id()
|
| 167 |
-
conversations[new_id] = {"title": f"New Conversation {new_id}", "messages": []}
|
| 168 |
-
return new_id, [], gr.update(choices=update_conversation_list(), value=conversations[new_id]["title"])
|
| 169 |
-
|
| 170 |
-
def load_conversation(selected_title):
|
| 171 |
-
for cid, convo in conversations.items():
|
| 172 |
-
if convo["title"] == selected_title:
|
| 173 |
-
return cid, convo["messages"], convo["messages"]
|
| 174 |
-
return current_convo_id.value, history_state.value, history_state.value
|
| 175 |
-
|
| 176 |
-
def send_message(user_message, max_tokens, temperature, top_p, convo_id, history):
|
| 177 |
-
if convo_id not in conversations:
|
| 178 |
-
#title = user_message.strip().split("\n")[0][:40]
|
| 179 |
-
title = " ".join(user_message.strip().split()[:5])
|
| 180 |
-
conversations[convo_id] = {"title": title, "messages": history}
|
| 181 |
-
if conversations[convo_id]["title"].startswith("New Conversation"):
|
| 182 |
-
#conversations[convo_id]["title"] = user_message.strip().split("\n")[0][:40]
|
| 183 |
-
conversations[convo_id]["title"] = " ".join(user_message.strip().split()[:5])
|
| 184 |
-
for updated_history, new_history in generate_response(user_message, max_tokens, temperature, top_p, history):
|
| 185 |
-
conversations[convo_id]["messages"] = new_history
|
| 186 |
-
yield updated_history, new_history, gr.update(choices=update_conversation_list(), value=conversations[convo_id]["title"])
|
| 187 |
|
| 188 |
submit_button.click(
|
| 189 |
-
fn=
|
| 190 |
-
inputs=[user_input, max_tokens_slider, temperature_slider, top_p_slider,
|
| 191 |
-
outputs=[chatbot, history_state
|
| 192 |
).then(
|
| 193 |
fn=lambda: gr.update(value=""),
|
| 194 |
inputs=None,
|
|
@@ -201,22 +192,20 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 201 |
outputs=[chatbot, history_state]
|
| 202 |
)
|
| 203 |
|
| 204 |
-
|
| 205 |
-
fn=
|
| 206 |
inputs=None,
|
| 207 |
-
outputs=
|
| 208 |
)
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
)
|
| 215 |
|
| 216 |
-
|
| 217 |
-
example2_button.click(fn=lambda: gr.update(value=example_messages["JEE Main 2025 Coordinate Geometry"]), inputs=None, outputs=user_input)
|
| 218 |
-
example3_button.click(fn=lambda: gr.update(value=example_messages["JEE Main 2025 Probability & Statistics"]), inputs=None, outputs=user_input)
|
| 219 |
-
example4_button.click(fn=lambda: gr.update(value=example_messages["JEE Main 2025 Laws of Motion"]), inputs=None, outputs=user_input)
|
| 220 |
-
|
| 221 |
-
demo.launch(ssr_mode=False)
|
| 222 |
-
|
|
|
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 4 |
import torch
|
| 5 |
from threading import Thread
|
| 6 |
+
import os
|
| 7 |
+
import logging
|
| 8 |
|
| 9 |
+
# Set up logging
|
| 10 |
+
logging.basicConfig(level=logging.INFO)
|
| 11 |
+
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
# Model and tokenizer configuration
|
| 14 |
+
MODEL_NAME = "FractalAIResearch/Fathom-R1-14B"
|
| 15 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 16 |
|
| 17 |
+
# Load tokenizer and model
|
| 18 |
+
try:
|
| 19 |
+
logger.info("Loading tokenizer and model...")
|
| 20 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 21 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 22 |
+
MODEL_NAME,
|
| 23 |
+
torch_dtype=torch.bfloat16, # Optimize for H200 GPUs
|
| 24 |
+
device_map="auto", # Automatically distribute across GPU
|
| 25 |
+
trust_remote_code=True # Required for Qwen2-based models
|
| 26 |
+
)
|
| 27 |
+
logger.info("Model and tokenizer loaded successfully.")
|
| 28 |
+
except Exception as e:
|
| 29 |
+
logger.error(f"Error loading model or tokenizer: {str(e)}")
|
| 30 |
+
raise e
|
| 31 |
|
| 32 |
+
# Ensure model is on GPU
|
| 33 |
+
#model = model.to(device)
|
| 34 |
|
| 35 |
@spaces.GPU(duration=60)
|
| 36 |
+
def generate_response(user_message, max_tokens, temperature, top_k, top_p, repetition_penalty, history_state):
|
| 37 |
if not user_message.strip():
|
| 38 |
+
logger.info("Empty message received, returning history unchanged.")
|
| 39 |
return history_state, history_state
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
try:
|
| 42 |
+
logger.info("Processing new message...")
|
| 43 |
+
# System prompt for Fathom-R1-14B
|
| 44 |
+
system_message = "You are a helpful assistant, specialising at math and STEM reasoning."
|
| 45 |
+
|
| 46 |
+
# Build messages list using Qwen2 chat template format
|
| 47 |
+
messages = [{"role": "system", "content": system_message}]
|
| 48 |
+
for message in history_state:
|
| 49 |
+
messages.append({"role": message["role"], "content": message["content"]})
|
| 50 |
+
messages.append({"role": "user", "content": user_message})
|
| 51 |
+
|
| 52 |
+
# Apply Qwen2 chat template
|
| 53 |
+
prompt = tokenizer.apply_chat_template(
|
| 54 |
+
messages,
|
| 55 |
+
tokenize=False,
|
| 56 |
+
add_generation_prompt=True
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# Tokenize input
|
| 60 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
| 61 |
+
|
| 62 |
+
# Configure sampling
|
| 63 |
+
do_sample = not (temperature == 1.0 and top_k >= 100 and top_p == 1.0)
|
| 64 |
+
|
| 65 |
+
# Set up streaming
|
| 66 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
|
| 67 |
+
|
| 68 |
+
# Generation parameters
|
| 69 |
+
generation_kwargs = {
|
| 70 |
+
"input_ids": inputs["input_ids"],
|
| 71 |
+
"attention_mask": inputs["attention_mask"],
|
| 72 |
+
"max_new_tokens": int(max_tokens),
|
| 73 |
+
"do_sample": do_sample,
|
| 74 |
+
"temperature": temperature,
|
| 75 |
+
"top_k": int(top_k),
|
| 76 |
+
"top_p": top_p,
|
| 77 |
+
"repetition_penalty": repetition_penalty,
|
| 78 |
+
"streamer": streamer,
|
| 79 |
+
"pad_token_id": tokenizer.eos_token_id
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
# Start generation in a separate thread
|
| 83 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 84 |
thread.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
# Stream the response
|
| 87 |
+
assistant_response = ""
|
| 88 |
+
new_history = history_state + [
|
| 89 |
+
{"role": "user", "content": user_message},
|
| 90 |
+
{"role": "assistant", "content": ""}
|
| 91 |
+
]
|
| 92 |
for new_token in streamer:
|
| 93 |
+
assistant_response += new_token
|
|
|
|
|
|
|
|
|
|
| 94 |
new_history[-1]["content"] = assistant_response.strip()
|
| 95 |
yield new_history, new_history
|
|
|
|
|
|
|
| 96 |
|
| 97 |
+
logger.info("Response generated successfully.")
|
| 98 |
+
yield new_history, new_history
|
| 99 |
|
| 100 |
+
except Exception as e:
|
| 101 |
+
logger.error(f"Error during inference: {str(e)}")
|
| 102 |
+
return f"Error: {str(e)}", history_state
|
| 103 |
|
| 104 |
+
# Example prompts
|
| 105 |
example_messages = {
|
| 106 |
+
"Math reasoning": "If a rectangular prism has a length of 6 cm, a width of 4 cm, and a height of 5 cm, what is the length of the longest line segment that can be drawn from one vertex to another?",
|
| 107 |
+
"Logic puzzle": "Four people (Alex, Blake, Casey, and Dana) each have a different favorite color (red, blue, green, yellow) and a different favorite fruit (apple, banana, cherry, date). Given the following clues: 1) The person who likes red doesn't like dates. 2) Alex likes yellow. 3) The person who likes blue likes cherries. 4) Blake doesn't like apples or bananas. 5) Casey doesn't like yellow or green. Who likes what color and what fruit?",
|
| 108 |
+
"Physics problem": "A ball is thrown upward with an initial velocity of 15 m/s from a height of 2 meters above the ground. Assuming the acceleration due to gravity is 9.8 m/s², determine: 1) The maximum height the ball reaches. 2) The total time the ball is in the air before hitting the ground. 3) The velocity with which the ball hits the ground."
|
|
|
|
| 109 |
}
|
| 110 |
|
| 111 |
+
# Gradio interface
|
| 112 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 113 |
+
gr.Markdown(
|
| 114 |
+
"""
|
| 115 |
+
# Fathom-R1-14B Chatbot
|
| 116 |
+
Welcome to the Fathom-R1-14B Chatbot! This model excels at multi-step reasoning tasks in mathematics, logic, and science.
|
| 117 |
+
|
| 118 |
+
The model specializes in math and STEM reasoning, providing detailed step-by-step solutions.
|
| 119 |
+
|
| 120 |
+
Try the example problems below to see how the model breaks down complex reasoning problems.
|
| 121 |
+
"""
|
| 122 |
+
)
|
| 123 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
history_state = gr.State([])
|
| 125 |
|
| 126 |
with gr.Row():
|
| 127 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
gr.Markdown("### Settings")
|
| 129 |
+
max_tokens_slider = gr.Slider(
|
| 130 |
+
minimum=64,
|
| 131 |
+
maximum=16384, # Fathom’s context window is 16K
|
| 132 |
+
step=1024,
|
| 133 |
+
value=4096,
|
| 134 |
+
label="Max Tokens"
|
| 135 |
+
)
|
| 136 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 137 |
+
temperature_slider = gr.Slider(
|
| 138 |
+
minimum=0.1,
|
| 139 |
+
maximum=2.0,
|
| 140 |
+
value=0.8,
|
| 141 |
+
label="Temperature"
|
| 142 |
+
)
|
| 143 |
+
top_k_slider = gr.Slider(
|
| 144 |
+
minimum=1,
|
| 145 |
+
maximum=100,
|
| 146 |
+
step=1,
|
| 147 |
+
value=50,
|
| 148 |
+
label="Top-k"
|
| 149 |
+
)
|
| 150 |
+
top_p_slider = gr.Slider(
|
| 151 |
+
minimum=0.1,
|
| 152 |
+
maximum=1.0,
|
| 153 |
+
value=0.95,
|
| 154 |
+
label="Top-p"
|
| 155 |
+
)
|
| 156 |
+
repetition_penalty_slider = gr.Slider(
|
| 157 |
+
minimum=1.0,
|
| 158 |
+
maximum=2.0,
|
| 159 |
+
value=1.0,
|
| 160 |
+
label="Repetition Penalty"
|
| 161 |
+
)
|
| 162 |
|
| 163 |
with gr.Column(scale=4):
|
| 164 |
+
chatbot = gr.Chatbot(label="Chat", type="messages")
|
|
|
|
| 165 |
with gr.Row():
|
| 166 |
+
user_input = gr.Textbox(
|
| 167 |
+
label="Your message",
|
| 168 |
+
placeholder="Type your message here...",
|
| 169 |
+
scale=3
|
| 170 |
+
)
|
| 171 |
+
submit_button = gr.Button("Send", variant="primary", scale=1)
|
| 172 |
+
clear_button = gr.Button("Clear", scale=1)
|
| 173 |
gr.Markdown("**Try these examples:**")
|
| 174 |
with gr.Row():
|
| 175 |
+
example1_button = gr.Button("Math reasoning")
|
| 176 |
+
example2_button = gr.Button("Logic puzzle")
|
| 177 |
+
example3_button = gr.Button("Physics problem")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
submit_button.click(
|
| 180 |
+
fn=generate_response,
|
| 181 |
+
inputs=[user_input, max_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repetition_penalty_slider, history_state],
|
| 182 |
+
outputs=[chatbot, history_state]
|
| 183 |
).then(
|
| 184 |
fn=lambda: gr.update(value=""),
|
| 185 |
inputs=None,
|
|
|
|
| 192 |
outputs=[chatbot, history_state]
|
| 193 |
)
|
| 194 |
|
| 195 |
+
example1_button.click(
|
| 196 |
+
fn=lambda: gr.update(value=example_messages["Math reasoning"]),
|
| 197 |
inputs=None,
|
| 198 |
+
outputs=user_input
|
| 199 |
)
|
| 200 |
+
example2_button.click(
|
| 201 |
+
fn=lambda: gr.update(value=example_messages["Logic puzzle"]),
|
| 202 |
+
inputs=None,
|
| 203 |
+
outputs=user_input
|
| 204 |
+
)
|
| 205 |
+
example3_button.click(
|
| 206 |
+
fn=lambda: gr.update(value=example_messages["Physics problem"]),
|
| 207 |
+
inputs=None,
|
| 208 |
+
outputs=user_input
|
| 209 |
)
|
| 210 |
|
| 211 |
+
demo.launch(ssr_mode=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|