Update README.md
#4
by
Forgege
- opened
README.md
CHANGED
|
@@ -34,6 +34,8 @@ DeepSeek-VL2-tiny is built on DeepSeekMoE-3B (total activated parameters are 1.0
|
|
| 34 |
On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
|
| 35 |
|
| 36 |
```shell
|
|
|
|
|
|
|
| 37 |
pip install -e .
|
| 38 |
```
|
| 39 |
|
|
@@ -48,12 +50,12 @@ pip install -e .
|
|
| 48 |
import torch
|
| 49 |
from transformers import AutoModelForCausalLM
|
| 50 |
|
| 51 |
-
from
|
| 52 |
-
from
|
| 53 |
|
| 54 |
|
| 55 |
# specify the path to the model
|
| 56 |
-
model_path = "deepseek-ai/deepseek-vl2-
|
| 57 |
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
|
| 58 |
tokenizer = vl_chat_processor.tokenizer
|
| 59 |
|
|
|
|
| 34 |
On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
|
| 35 |
|
| 36 |
```shell
|
| 37 |
+
git clone https://github.com/deepseek-ai/DeepSeek-VL2
|
| 38 |
+
cd DeepSeek-VL2
|
| 39 |
pip install -e .
|
| 40 |
```
|
| 41 |
|
|
|
|
| 50 |
import torch
|
| 51 |
from transformers import AutoModelForCausalLM
|
| 52 |
|
| 53 |
+
from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
|
| 54 |
+
from deepseek_vl2.utils.io import load_pil_images
|
| 55 |
|
| 56 |
|
| 57 |
# specify the path to the model
|
| 58 |
+
model_path = "deepseek-ai/deepseek-vl2-tiny"
|
| 59 |
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
|
| 60 |
tokenizer = vl_chat_processor.tokenizer
|
| 61 |
|