prithivMLmods commited on
Commit
611efc9
·
verified ·
1 Parent(s): 2382bf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -136,7 +136,7 @@ if torch.cuda.is_available():
136
 
137
  print("Using device:", device)
138
 
139
- MODEL_ID_X = "nota-ai/ERGO-7B"
140
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
141
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
142
  MODEL_ID_X,
@@ -179,7 +179,7 @@ model_v4 = AutoModel.from_pretrained(
179
  tokenizer_v4 = AutoTokenizer.from_pretrained(MODEL_ID_V4, trust_remote_code=True, use_fast=False)
180
 
181
  MODELS = {
182
- "ERGO-7B": (processor_x, model_x),
183
  "Typhoon-OCR-3B": (processor_t, model_t),
184
  "olmOCR-7B-0225-preview": (processor_o, model_o),
185
  "Lumian-VLR-7B-Thinking": (processor_j, model_j),
@@ -357,7 +357,7 @@ with gr.Blocks(theme=light_salmon_theme, css=css) as demo:
357
  ])
358
 
359
  model_choice = gr.Radio(
360
- choices=["Lumian-VLR-7B-Thinking", "ERGO-7B", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "olmOCR-7B-0225-preview"],
361
  label="Select Model",
362
  value="Lumian-VLR-7B-Thinking"
363
  )
 
136
 
137
  print("Using device:", device)
138
 
139
+ MODEL_ID_X = "Senqiao/VisionThink-Efficient"
140
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
141
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
142
  MODEL_ID_X,
 
179
  tokenizer_v4 = AutoTokenizer.from_pretrained(MODEL_ID_V4, trust_remote_code=True, use_fast=False)
180
 
181
  MODELS = {
182
+ "VisionThink-Efficient": (processor_x, model_x),
183
  "Typhoon-OCR-3B": (processor_t, model_t),
184
  "olmOCR-7B-0225-preview": (processor_o, model_o),
185
  "Lumian-VLR-7B-Thinking": (processor_j, model_j),
 
357
  ])
358
 
359
  model_choice = gr.Radio(
360
+ choices=["Lumian-VLR-7B-Thinking", "VisionThink-Efficient", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "olmOCR-7B-0225-preview"],
361
  label="Select Model",
362
  value="Lumian-VLR-7B-Thinking"
363
  )