Update README.md
Browse files
README.md
CHANGED
|
@@ -1,20 +1,18 @@
|
|
| 1 |
---
|
| 2 |
-
datasets:
|
| 3 |
-
- liuhaotian/LLaVA-Pretrain
|
| 4 |
-
- liuhaotian/LLaVA-Instruct-150K
|
| 5 |
language:
|
| 6 |
- en
|
| 7 |
tags:
|
| 8 |
- llava
|
| 9 |
- phi
|
|
|
|
| 10 |
license: mit
|
| 11 |
library_name: transformers
|
| 12 |
base_model: visheratin/MC-LLaVA-3b
|
| 13 |
widget:
|
| 14 |
-
- text:
|
| 15 |
-
src:
|
| 16 |
-
- text:
|
| 17 |
-
src:
|
| 18 |
---
|
| 19 |
|
| 20 |
# HelpingAI-Vision
|
|
@@ -122,4 +120,4 @@ streamer = TextStreamer(tokenizer)
|
|
| 122 |
with torch.inference_mode():
|
| 123 |
output = model.generate(**inputs, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=1.2, eos_token_id=tokenizer.eos_token_id, streamer=streamer)
|
| 124 |
print(tokenizer.decode(output[0]).replace(prompt, "").replace("<|im_end|>", ""))
|
| 125 |
-
```
|
|
|
|
| 1 |
---
|
|
|
|
|
|
|
|
|
|
| 2 |
language:
|
| 3 |
- en
|
| 4 |
tags:
|
| 5 |
- llava
|
| 6 |
- phi
|
| 7 |
+
- HelpingAI
|
| 8 |
license: mit
|
| 9 |
library_name: transformers
|
| 10 |
base_model: visheratin/MC-LLaVA-3b
|
| 11 |
widget:
|
| 12 |
+
- text: What animal is it?
|
| 13 |
+
src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
|
| 14 |
+
- text: Where is it?
|
| 15 |
+
src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
|
| 16 |
---
|
| 17 |
|
| 18 |
# HelpingAI-Vision
|
|
|
|
| 120 |
with torch.inference_mode():
|
| 121 |
output = model.generate(**inputs, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=1.2, eos_token_id=tokenizer.eos_token_id, streamer=streamer)
|
| 122 |
print(tokenizer.decode(output[0]).replace(prompt, "").replace("<|im_end|>", ""))
|
| 123 |
+
```
|