hongxingli commited on
Commit
32dc991
·
verified ·
1 Parent(s): d4e53d8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -43,7 +43,7 @@ from qwen_vl_utils import process_vision_info
43
 
44
  # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
45
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
46
- "inclusionAI/GUI-G2-7B",
47
  torch_dtype=torch.bfloat16,
48
  attn_implementation="flash_attention_2",
49
  device_map="auto")
 
43
 
44
  # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
45
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
46
+ "hongxingli/SpatialLadder-3B",
47
  torch_dtype=torch.bfloat16,
48
  attn_implementation="flash_attention_2",
49
  device_map="auto")