GenVRadmin commited on
Commit
bc753a8
·
verified ·
1 Parent(s): 5ed8781

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -1
README.md CHANGED
@@ -35,4 +35,39 @@ AryaBhatta-GemmaOrca 35.9 72.26 53.85 40.35 50.59 \
35
  zephyr-7b-beta 37.52 71.77 55.26 39.77 51.08 \
36
  zephyr-7b-gemma-v0.1 34.22 66.37 52.19 37.10 47.47 \
37
  mlabonne/Gemmalpaca-7B 21.6 40.87 44.85 30.49 34.45 \
38
- google/gemma-7b-it 21.33 40.84 41.70 30.25 33.53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  zephyr-7b-beta 37.52 71.77 55.26 39.77 51.08 \
36
  zephyr-7b-gemma-v0.1 34.22 66.37 52.19 37.10 47.47 \
37
  mlabonne/Gemmalpaca-7B 21.6 40.87 44.85 30.49 34.45 \
38
+ google/gemma-7b-it 21.33 40.84 41.70 30.25 33.53
39
+
40
+
41
+
42
+ How to use:-
43
+
44
+ from peft import AutoPeftModelForCausalLM
45
+ from transformers import AutoTokenizer
46
+
47
+ model = AutoPeftModelForCausalLM.from_pretrained(
48
+ "GenVRadmin/AryaBhatta-GemmaOrca",
49
+ load_in_4bit = False,
50
+ token = hf_token
51
+ )
52
+ tokenizer = AutoTokenizer.from_pretrained("GenVRadmin/AryaBhatta-GemmaOrca")
53
+
54
+ input_prompt = """
55
+ ### Instruction:
56
+ {}
57
+
58
+ ### Input:
59
+ {}
60
+
61
+ ### Response:
62
+ {}"""
63
+
64
+ input_text = input_prompt.format(
65
+ "Answer this question about India.", # instruction
66
+ "Who is the Prime Minister of India", # input
67
+ "", # output - leave this blank for generation!
68
+ )
69
+
70
+ inputs = tokenizer([input_text], return_tensors = "pt").to("cuda")
71
+
72
+ outputs = model.generate(**inputs, max_new_tokens = 300, use_cache = True)
73
+ response = tokenizer.batch_decode(outputs)[0]