PaDT-MLLM commited on
Commit
7391dfa
·
verified ·
1 Parent(s): 5af791d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -5
README.md CHANGED
@@ -3,11 +3,9 @@ license: apache-2.0
3
  language:
4
  - en
5
  - zh
6
- base_model:
7
- - Qwen/Qwen2.5-VL-3B-Instruct
8
- - Qwen/Qwen2.5-VL-7B-Instruct
9
  library_name: transformers
10
- pipeline_tag: any-to-any
 
11
  ---
12
 
13
  <div align='center'><h1>Patch-as-Decodable-Token: Towards Unified Multi-Modal Vision Tasks in MLLMs</h1></div>
@@ -97,7 +95,7 @@ processor = VisonTextProcessingClass(processor, model.config.vision_config.spati
97
  processor.prepare(model.model.embed_tokens.weight.shape[0])
98
 
99
  # question prompt
100
- PROMPT = "Please describe this image."
101
 
102
  # construct conversation
103
  message = [
 
3
  language:
4
  - en
5
  - zh
 
 
 
6
  library_name: transformers
7
+ task_categories: [object-detection, image-segmentation]
8
+ tags: [mllm, multimodal, vision-language-model, referring-expression-comprehension, computer-vision]
9
  ---
10
 
11
  <div align='center'><h1>Patch-as-Decodable-Token: Towards Unified Multi-Modal Vision Tasks in MLLMs</h1></div>
 
95
  processor.prepare(model.model.embed_tokens.weight.shape[0])
96
 
97
  # question prompt
98
+ PROMPT = """Please carefully check the image and detect the object this sentence describes: "The car is on the left side of the horse"."""
99
 
100
  # construct conversation
101
  message = [