Update README.md
Browse files
README.md
CHANGED
|
@@ -5,5 +5,75 @@ This is the repository for SophiaVL-R1-7B.
|
|
| 5 |
|
| 6 |
A simple inference example:
|
| 7 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
```
|
|
|
|
| 5 |
|
| 6 |
A simple inference example:
|
| 7 |
```python
|
| 8 |
+
from transformers import AutoProcessor
|
| 9 |
+
import torch
|
| 10 |
+
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
|
| 11 |
+
from PIL import Image
|
| 12 |
|
| 13 |
+
MODEL_PATH = "bunny127/SophiaVL-R1-7B"
|
| 14 |
+
# Example usage:
|
| 15 |
+
# {
|
| 16 |
+
# "problem_id": 1,
|
| 17 |
+
# "problem": "Subtract 0 cyan cubes. How many objects are left?",
|
| 18 |
+
# "data_type": "image",
|
| 19 |
+
# "problem_type": "numerical",
|
| 20 |
+
# "options": [],
|
| 21 |
+
# "process": "",
|
| 22 |
+
# "solution": "<answer>5</answer>",
|
| 23 |
+
# "path": "./Math/CLEVR-Math/images/CLEVR_train_036427.png",
|
| 24 |
+
# "data_source": "CLEVR-Math"
|
| 25 |
+
# },
|
| 26 |
+
image_path = "/path/to/dataset/Math/CLEVR-Math/images/CLEVR_train_036427.png"
|
| 27 |
+
prompt = "Subtract 0 cyan cubes. How many objects are left?"
|
| 28 |
+
question_type = "numerical"
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(MODEL_PATH, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2",device_map="auto")
|
| 32 |
+
processor = AutoProcessor.from_pretrained(MODEL_PATH)
|
| 33 |
+
|
| 34 |
+
SYS_PROMPT = """You FIRST think about the reasoning process as an internal monologue and then provide the final answer.
|
| 35 |
+
The reasoning process MUST BE enclosed within <think> </think> tagsdd. The final answer MUST BE enclosed within <answer> </answer> tags, for example <think>your_thinking_process</think><answer>your_final_answer</answer>. If you use formula, please use LaTeX format."""
|
| 36 |
+
|
| 37 |
+
QUESTION_TEMPLATE = (
|
| 38 |
+
"{Question}\n"
|
| 39 |
+
"Please think about this question as if you were a human pondering deeply. "
|
| 40 |
+
"Engage in an internal dialogue using expressions such as 'let me think', 'wait', 'Hmm', 'oh, I see', 'let's break it down', etc, or other natural language thought expressions "
|
| 41 |
+
"It's encouraged to include self-reflection or verification in the reasoning process. "
|
| 42 |
+
"Provide your detailed reasoning between the <think> and </think> tags, and then give your final answer between the <answer> and </answer> tags."
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
TYPE_TEMPLATE = {
|
| 46 |
+
"multiple choice": " Please provide only the single option letter (e.g., A, B, C, D, etc.) within the <answer> </answer> tags.",
|
| 47 |
+
"numerical": " Please provide the numerical value (e.g., 42 or 3.14) within the <answer> </answer> tags.",
|
| 48 |
+
"OCR": " Please transcribe text from the image/video clearly and provide your text answer within the <answer> </answer> tags.",
|
| 49 |
+
"free-form": " Please provide your text answer within the <answer> </answer> tags."
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def inference(image_path, question, problem_type = "numerical", sys_prompt="You are a helpful assistant.", max_new_tokens=4096, return_input=False):
|
| 53 |
+
image = Image.open(image_path)
|
| 54 |
+
image_local_path = "file://" + image_path
|
| 55 |
+
messages = [
|
| 56 |
+
{"role": "system", "content": sys_prompt},
|
| 57 |
+
{"role": "user", "content": [
|
| 58 |
+
{"type": "text", "text": QUESTION_TEMPLATE.format(Question=question) + TYPE_TEMPLATE[problem_type]},
|
| 59 |
+
{"image": image_local_path},
|
| 60 |
+
]
|
| 61 |
+
},
|
| 62 |
+
]
|
| 63 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 64 |
+
print("text:", text)
|
| 65 |
+
# image_inputs, video_inputs = process_vision_info([messages])
|
| 66 |
+
inputs = processor(text=[text], images=[image], padding=True, return_tensors="pt")
|
| 67 |
+
inputs = inputs.to('cuda')
|
| 68 |
+
|
| 69 |
+
output_ids = model.generate(**inputs, max_new_tokens=max_new_tokens)
|
| 70 |
+
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
| 71 |
+
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
| 72 |
+
if return_input:
|
| 73 |
+
return output_text[0], inputs
|
| 74 |
+
else:
|
| 75 |
+
return output_text[0]
|
| 76 |
+
|
| 77 |
+
response = inference(image_path, prompt, question_type, sys_prompt=SYS_PROMPT, max_new_tokens=2048)
|
| 78 |
+
print(response)
|
| 79 |
```
|