Add README.md, adapter_config.
Browse files- README.md +32 -0
- adapter_config.json +37 -0
- adapter_model.safetensors +3 -0
README.md
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: llava-hf/llava-1.5-7b-hf
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
## Usage
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
from transformers import AutoProcessor, LlavaForConditionalGeneration
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
PROCESSOR_ID = "llava-hf/llava-1.5-7b-hf"
|
| 12 |
+
processor = AutoProcessor.from_pretrained(PROCESSOR_ID)
|
| 13 |
+
processor.tokenizer.padding_side = "left"
|
| 14 |
+
|
| 15 |
+
MODEL_ID = "metchee/persrv"
|
| 16 |
+
tuned_model = LlavaForConditionalGeneration.from_pretrained(
|
| 17 |
+
MODEL_ID,
|
| 18 |
+
torch_dtype=torch.float16,
|
| 19 |
+
quantization_config=quantization_config,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
image = Image.open(image_path).convert("RGB")
|
| 23 |
+
inputs = processor(text=prompt, images=[image], return_tensors="pt").to("cuda")
|
| 24 |
+
tuned_generated_ids = tuned_model.generate(**inputs, max_new_tokens=MAX_LENGTH)
|
| 25 |
+
generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
### Framework versions
|
| 29 |
+
|
| 30 |
+
- PEFT 0.12.0
|
| 31 |
+
- Transformers 4.41.2
|
| 32 |
+
- bitsandbytes 0.43.3
|
adapter_config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": {
|
| 4 |
+
"base_model_class": "LlavaForConditionalGeneration",
|
| 5 |
+
"parent_library": "transformers.models.llava.modeling_llava"
|
| 6 |
+
},
|
| 7 |
+
"base_model_name_or_path": "llava-hf/llava-1.5-7b-hf",
|
| 8 |
+
"bias": "none",
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": "gaussian",
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 8,
|
| 17 |
+
"lora_dropout": 0.1,
|
| 18 |
+
"megatron_config": null,
|
| 19 |
+
"megatron_core": "megatron.core",
|
| 20 |
+
"modules_to_save": null,
|
| 21 |
+
"peft_type": "LORA",
|
| 22 |
+
"r": 8,
|
| 23 |
+
"rank_pattern": {},
|
| 24 |
+
"revision": null,
|
| 25 |
+
"target_modules": [
|
| 26 |
+
"o_proj",
|
| 27 |
+
"down_proj",
|
| 28 |
+
"k_proj",
|
| 29 |
+
"up_proj",
|
| 30 |
+
"v_proj",
|
| 31 |
+
"q_proj",
|
| 32 |
+
"gate_proj"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"use_dora": false,
|
| 36 |
+
"use_rslora": false
|
| 37 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0698d22585202efaf49c8fc5cf1030bed577d51dc0b4c0aa7d12e224ceb94f14
|
| 3 |
+
size 84761704
|