import gradio as gr
import spaces
import torch
from PIL import Image
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
MODEL_ID = "internlm/Spatial-SSRL-3B"
MAX_NEW_TOKENS = 2048
# Example questions from the examples
EXAMPLE_QUESTIONS = [
"Consider the real-world 3D location of the objects. Which object is further away from the camera? A. boat B. fire hydrant\n You FIRST think about the reasoning process as an internal monologue and then provide the final answer. The reasoning process MUST BE enclosed within tags. The final answer MUST BE put in \\boxed{}.",
"Consider the real-world 3D orientations of the objects. Are the kid and the teddy bear facing same or similar directions, or very different directions? A. very different directions B. same or similar directions\n You FIRST think about the reasoning process as an internal monologue and then provide the final answer. The reasoning process MUST BE enclosed within tags. The final answer MUST BE put in \\boxed{}.",
"Consider the real-world 3D locations and orientations of the objects. If I stand at the recreational vehicle's position facing where it is facing, is the dog in front of me or behind me? A. behind B. in front of\n You FIRST think about the reasoning process as an internal monologue and then provide the final answer. The reasoning process MUST BE enclosed within tags. The final answer MUST BE put in \\boxed{}."
]
def get_device() -> str:
return "cuda" if torch.cuda.is_available() else "cpu"
def select_dtype(device: str):
if device == "cuda":
if torch.cuda.is_bf16_supported():
return torch.bfloat16
return torch.float16
return torch.float32
def load_model():
device = get_device()
dtype = select_dtype(device)
# Use device_map="auto" for proper GPU allocation with spaces.GPU decorator
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID,
torch_dtype=dtype,
device_map="auto",
trust_remote_code=True,
)
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
return model, processor
MODEL, PROCESSOR = load_model()
@spaces.GPU
@torch.inference_mode()
def answer_question(image: Image.Image, question: str):
if image is None:
return "Please upload an image.", 0
if not question or question.strip() == "":
return "Please enter a question.", 0
try:
# Validate image
if not isinstance(image, Image.Image):
return "Error: Invalid image format", 0
# Check image size (warn if too large)
max_size = 4096
if image.width > max_size or image.height > max_size:
# Resize if too large to prevent OOM
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
device = MODEL.device
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": question},
],
}
]
prompt_text = PROCESSOR.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
inputs = PROCESSOR(
text=[prompt_text],
images=[image],
return_tensors="pt",
).to(device)
generated_ids = MODEL.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS,
do_sample=False,
)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = PROCESSOR.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
answer = output_text[0].strip()
input_ids = inputs.get("input_ids")
input_length = input_ids.shape[-1] if input_ids is not None else 0
total_length = generated_ids.shape[-1]
num_generated_tokens = max(total_length - input_length, 0)
return answer, int(num_generated_tokens)
except torch.cuda.OutOfMemoryError:
torch.cuda.empty_cache()
return "Error: Out of GPU memory. Please try with a smaller image.", 0
except Exception as e:
return f"Error generating answer: {str(e)}", 0
def load_example(example_idx):
"""Load example image and question based on index"""
example_images = [
"./examples/eg1.jpg",
"./examples/eg2.jpg",
"./examples/eg3.jpg"
]
if 0 <= example_idx < len(EXAMPLE_QUESTIONS):
return Image.open(example_images[example_idx]), EXAMPLE_QUESTIONS[example_idx]
return None, ""
with gr.Blocks(title="Spatial-SSRL Spatial Reasoning") as demo:
gr.Markdown("# 🌍 Spatial-SSRL: Spatial Reasoning with Vision-Language Models")
gr.Markdown("### Understanding 3D Spatial Relationships from 2D Images")
gr.Markdown("✨ Upload an image and ask questions about spatial relationships, locations, and orientations! ✨")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="pil", label="Input Image")
question_input = gr.Textbox(
label="Question",
placeholder="Ask a question about spatial relationships in the image...",
lines=4
)
submit_button = gr.Button("Submit", variant="primary")
with gr.Column():
answer_output = gr.Textbox(label="Answer", lines=10)
token_output = gr.Number(label="Generated Tokens", precision=0)
submit_button.click(
fn=answer_question,
inputs=[image_input, question_input],
outputs=[answer_output, token_output],
show_progress=True,
)
gr.Markdown("### 📸 Example Questions")
gr.Markdown("Click on an example below to load it:")
with gr.Row():
example1_btn = gr.Button("Example 1: Boat vs Fire Hydrant")
example2_btn = gr.Button("Example 2: Kid and Teddy Bear")
example3_btn = gr.Button("Example 3: RV and Dog")
example1_btn.click(
fn=lambda: load_example(0),
inputs=[],
outputs=[image_input, question_input],
)
example2_btn.click(
fn=lambda: load_example(1),
inputs=[],
outputs=[image_input, question_input],
)
example3_btn.click(
fn=lambda: load_example(2),
inputs=[],
outputs=[image_input, question_input],
)
gr.Examples(
examples=[
["./examples/eg1.jpg", EXAMPLE_QUESTIONS[0]],
["./examples/eg2.jpg", EXAMPLE_QUESTIONS[1]],
["./examples/eg3.jpg", EXAMPLE_QUESTIONS[2]],
],
inputs=[image_input, question_input],
outputs=[answer_output, token_output],
fn=answer_question,
cache_examples=True,
label="Complete Examples"
)
gr.Markdown("### About")
gr.Markdown(
"""
This demo showcases spatial reasoning capabilities of vision-language models. The model can:
- Understand 3D spatial relationships from 2D images
- Reason about object locations (near/far, front/behind)
- Analyze object orientations and facing directions
- Provide step-by-step reasoning before answering
The model is trained to provide answers in a structured format with reasoning enclosed in `` tags and final answers in `\\boxed{}`.
"""
)
demo.launch()