Commit ·
e8e1802
1
Parent(s): f644c7c
Create README.md
Browse files
README.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
---
|
| 3 |
+
datasets:
|
| 4 |
+
- Open-Orca/OpenOrca
|
| 5 |
+
language:
|
| 6 |
+
- en
|
| 7 |
+
library_name: transformers
|
| 8 |
+
---
|
| 9 |
+
# Test task
|
| 10 |
+
|
| 11 |
+
For model inference run following
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
from peft import PeftModel, PeftConfig
|
| 15 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
| 16 |
+
from peft import PeftModel
|
| 17 |
+
|
| 18 |
+
seed_value = 42
|
| 19 |
+
torch.manual_seed(seed_value)
|
| 20 |
+
torch.cuda.manual_seed_all(seed_value)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
model_name = "lmsys/vicuna-7b-v1.5"
|
| 24 |
+
lora_name = 'AlexWortega/PaltaTest'
|
| 25 |
+
|
| 26 |
+
tokenizer = LlamaTokenizer.from_pretrained(model_name, model_max_length=1024)
|
| 27 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 28 |
+
|
| 29 |
+
model = PeftModel.from_pretrained(
|
| 30 |
+
model,
|
| 31 |
+
lora_name,
|
| 32 |
+
torch_dtype=torch.float16
|
| 33 |
+
)
|
| 34 |
+
model.eval()
|
| 35 |
+
|
| 36 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 ).to('cpu')
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
model = PeftModel.from_pretrained(model, path_adapter)
|
| 40 |
+
model.to(device)
|
| 41 |
+
model.eval()
|
| 42 |
+
|
| 43 |
+
def process_output(i, o):
|
| 44 |
+
"""
|
| 45 |
+
Simple output processing
|
| 46 |
+
"""
|
| 47 |
+
if isinstance(o, list):
|
| 48 |
+
return [seq.split('A:')[1] for seq in o]
|
| 49 |
+
elif isinstance(o, str):
|
| 50 |
+
return o.split('A:')[1]
|
| 51 |
+
else:
|
| 52 |
+
return "Unsupported data type. Please provide a list or a string."
|
| 53 |
+
|
| 54 |
+
def generate_seqs(q, k=2):
|
| 55 |
+
|
| 56 |
+
q = 'Q:'+ q + 'A:'
|
| 57 |
+
tokens = tokenizer.encode(q, return_tensors='pt').to(device)
|
| 58 |
+
g = model.generate(input_ids=tokens)
|
| 59 |
+
generated_sequences = tokenizer.batch_decode(g, skip_special_tokens=True)
|
| 60 |
+
|
| 61 |
+
return generated_sequences
|
| 62 |
+
|
| 63 |
+
q = """Given a weather description in plain text, rewrite it in a different style
|
| 64 |
+
|
| 65 |
+
```The weather is sunny and the temperature is 20 degrees. The wind is blowing at 10 km/h.
|
| 66 |
+
Citizens are advised to go out and enjoy the weather. The weather is expected to be sunny tomorrow.
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
And the following style: "Angry weatherman"
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
s = generate_seqs(q=q)
|
| 73 |
+
s = process_output(q,s)
|
| 74 |
+
print(s[0])#should output something like these
|
| 75 |
+
"""
|
| 76 |
+
Angry weatherman: "The weather is sunny and the temperature is 20 degrees. The wind is blowing at 10 km/h.
|
| 77 |
+
Citizens are advised to stay indoors and avoid going out. The weather is expected to be sunny tomorrow.
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
```
|