Update README.md
Browse files
README.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
license: mit
|
| 3 |
language:
|
| 4 |
- en
|
| 5 |
-
pipeline_tag:
|
| 6 |
---
|
| 7 |
# git_20
|
| 8 |
|
|
@@ -13,7 +13,7 @@ This model is fine-tuned with LLaMA with 8 Nvidia A100-80G GPUs using 3,000,000
|
|
| 13 |
```python
|
| 14 |
from transformers import AutoModelForCausalLM
|
| 15 |
from transformers import AutoProcessor
|
| 16 |
-
|
| 17 |
model = AutoModelForCausalLM.from_pretrained("Fan21/git_20")
|
| 18 |
processor = AutoProcessor.from_pretrained("Fan21/git_20")
|
| 19 |
|
|
@@ -26,4 +26,4 @@ with torch.no_grad():
|
|
| 26 |
outputs = model.generate(pixel_values=pixel_values, max_length=50)
|
| 27 |
|
| 28 |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 29 |
-
```
|
|
|
|
| 2 |
license: mit
|
| 3 |
language:
|
| 4 |
- en
|
| 5 |
+
pipeline_tag: image-to-text
|
| 6 |
---
|
| 7 |
# git_20
|
| 8 |
|
|
|
|
| 13 |
```python
|
| 14 |
from transformers import AutoModelForCausalLM
|
| 15 |
from transformers import AutoProcessor
|
| 16 |
+
from PIL import Image
|
| 17 |
model = AutoModelForCausalLM.from_pretrained("Fan21/git_20")
|
| 18 |
processor = AutoProcessor.from_pretrained("Fan21/git_20")
|
| 19 |
|
|
|
|
| 26 |
outputs = model.generate(pixel_values=pixel_values, max_length=50)
|
| 27 |
|
| 28 |
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 29 |
+
```
|