File size: 1,755 Bytes
bafeff7 c383250 bafeff7 d251ee1 bafeff7 d251ee1 ebf138d d251ee1 bafeff7 9f7747d bafeff7 9201934 bafeff7 6200ad9 bafeff7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
<p><h1> 🦾 Mistralic-7B-1 🦾 </h1></p>
Special thanks to Together Compute for sponsoring Skunkworks with compute!
**INFERENCE**
```
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
torch.set_default_device('cuda')
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
def generate_prompt(instruction, input=None):
if input:
prompt = f"### System:\n{system_prompt}\n\n"
else:
prompt = f"### System:\n{system_no_input_prompt}\n\n"
prompt += f"### Instruction:\n{instruction}\n\n"
if input:
prompt += f"### Input:\n{input}\n\n"
return prompt + """### Response:\n"""
device = "cuda"
model = AutoModelForCausalLM.from_pretrained("SkunkworksAI/Mistralic-7B-1")
tokenizer = AutoTokenizer.from_pretrained("SkunkworksAI/Mistralic-7B-1")
while True:
instruction = input("Enter Instruction: ")
instruction = generate_prompt(instruction)
inputs = tokenizer(instruction, return_tensors="pt", return_attention_mask=False)
outputs = model.generate(**inputs, max_length=1000, do_sample=True, temperature=0.01, use_cache=True, eos_token_id=tokenizer.eos_token_id)
text = tokenizer.batch_decode(outputs)[0]
print(text)
```
**EVALUATION**

Average: 0.72157
For comparison:
mistralai/Mistral-7B-v0.1 scores 0.7116
mistralai/Mistral-7B-Instruct-v0.1 scores 0.6794
|