pharaouk commited on
Commit
d251ee1
·
1 Parent(s): bafeff7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +34 -0
README.md CHANGED
@@ -2,7 +2,41 @@
2
 
3
  **INFERENCE**
4
 
 
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  **EVALUATION**
8
 
 
2
 
3
  **INFERENCE**
4
 
5
+ ```
6
+ import torch
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer
8
 
9
+ torch.set_default_device('cuda')
10
+ system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
11
+ system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
12
+
13
+ def generate_prompt(instruction, input=None):
14
+ if input:
15
+ prompt = f"### System:\n{system_prompt}\n\n"
16
+ else:
17
+ prompt = f"### System:\n{system_no_input_prompt}\n\n"
18
+ prompt = f"### Instruction:\n{instruction}\n\n"
19
+ if input:
20
+ prompt += f"### Input:\n{input}\n\n"
21
+ return prompt + """### Response:\n"""
22
+
23
+
24
+ device = "cuda"
25
+
26
+ model = AutoModelForCausalLM.from_pretrained("SkunkworksAI/Mistralic-7B-1")
27
+ tokenizer = AutoTokenizer.from_pretrained("SkunkworksAI/Mistralic-7B-1")
28
+ tokenizer.eos_token = "<|im_end|>"
29
+
30
+
31
+ while True:
32
+ instruction = input("Enter Instruction: ")
33
+ instruction = generate_prompt(instruction)
34
+ inputs = tokenizer(instruction, return_tensors="pt", return_attention_mask=False)
35
+
36
+ outputs = model.generate(**inputs, max_length=1000, do_sample=True, temperature=0.01, use_cache=True, eos_token_id=tokenizer.eos_token_id)
37
+ text = tokenizer.batch_decode(outputs)[0]
38
+ print(text)
39
+ ```
40
 
41
  **EVALUATION**
42