IWM_TEST_SPACE / app.py
gitglubber's picture
Update app.py
6da1092 verified
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
# --- Model Loading ---
model_name = "gitglubber/Qwen3-IWM"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# --- System Message ---
# Define the persona or instructions for the model
system_message = """"You are an expert at the terminal. If asked to perform a task - decorate the command with @command. Explaining why you would perform that task to complete the function. Then if there are follow up commands use @command2, etc. Be helpful and willing to correction."""
# --- Generation Function ---
@spaces.GPU(duration=120)
def generate_response(chat_history):
# Prepare the model input from the chat history
# The system message is the first entry
messages = [{"role": "system", "content": system_message}]
# Add previous user/assistant messages
for user_msg, assistant_msg in chat_history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
# Apply the chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# Generate text
generated_ids = model.generate(
**model_inputs,
max_new_tokens=8192
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True)
return content
# --- Gradio Interface ---
with gr.Blocks(fill_height=True) as demo:
gr.Markdown("# IWM Chat Bot")
# We use a state object to store the system message, though it's constant here
chatbot = gr.Chatbot(scale=1)
msg = gr.Textbox(label="Input", scale=0)
clear = gr.Button("Clear")
def respond(message, chat_history):
if not message.strip(): # Check for empty or whitespace-only messages
return "", chat_history
# Append the new user message to the history
chat_history.append((message, None))
# Prepare history for the model (without the last empty spot)
model_input_history = chat_history[:-1]
model_input_history.append((message, None)) # Add current message for context
# Flatten the history for the model function
flat_history = []
for user, assistant in chat_history:
if user: flat_history.append((user, assistant))
bot_response = generate_response(flat_history)
# Update the last entry in chat_history with the bot's response
chat_history[-1] = (message, bot_response)
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
# Launch the app
demo.launch()