EthanCastro commited on
Commit
401720b
·
verified ·
1 Parent(s): bdb0c56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -4,11 +4,10 @@ from peft import PeftModel
4
  import torch
5
 
6
  # --- CONFIGURATION ---
7
- BASE_MODEL = "unsloth/Qwen3-VL-2B-Instruct-unsloth-bnb-4bit"
8
- LORA_ID = "EthanCastro/qwen3-vl-2b-quickdraw"
9
 
10
  print("Loading model and processor...")
11
- # Load the base 4-bit model
12
  model = AutoModelForImageTextToText.from_pretrained(
13
  BASE_MODEL,
14
  torch_dtype=torch.float16,
@@ -16,7 +15,7 @@ model = AutoModelForImageTextToText.from_pretrained(
16
  trust_remote_code=True
17
  )
18
 
19
- # Apply your LoRA adapters from your Model Repo
20
  model = PeftModel.from_pretrained(model, LORA_ID)
21
  processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct", trust_remote_code=True)
22
  print("Model Ready!")
@@ -48,7 +47,7 @@ def respond(message, image, history):
48
 
49
  generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0]
50
 
51
- # Extract only the latest assistant response
52
  if "assistant" in generated_text:
53
  response = generated_text.split("assistant")[-1].strip()
54
  else:
@@ -60,7 +59,8 @@ def respond(message, image, history):
60
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
61
  gr.Markdown("# 🎨 QuickDraw → tldraw JSON")
62
 
63
- chatbot = gr.Chatbot(height=500)
 
64
 
65
  with gr.Row():
66
  img_input = gr.Image(type="pil", label="Upload Sketch", scale=1)
 
4
  import torch
5
 
6
  # --- CONFIGURATION ---
7
+ BASE_MODEL = "unsloth/Qwen3-VL-2B-Instruct-unsloth-bnb-4bit"
8
+ LORA_ID = "EthanCastro/qwen3-vl-2b-quickdraw" # <--- Checked: Matches your repo name
9
 
10
  print("Loading model and processor...")
 
11
  model = AutoModelForImageTextToText.from_pretrained(
12
  BASE_MODEL,
13
  torch_dtype=torch.float16,
 
15
  trust_remote_code=True
16
  )
17
 
18
+ # Load your LoRA adapters
19
  model = PeftModel.from_pretrained(model, LORA_ID)
20
  processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct", trust_remote_code=True)
21
  print("Model Ready!")
 
47
 
48
  generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0]
49
 
50
+ # Extract only the assistant's response
51
  if "assistant" in generated_text:
52
  response = generated_text.split("assistant")[-1].strip()
53
  else:
 
59
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
60
  gr.Markdown("# 🎨 QuickDraw → tldraw JSON")
61
 
62
+ # --- FIX IS HERE: type='tuples' supports your current history format ---
63
+ chatbot = gr.Chatbot(height=500, type="tuples")
64
 
65
  with gr.Row():
66
  img_input = gr.Image(type="pil", label="Upload Sketch", scale=1)