Joshi-Aryan commited on
Commit
e847b28
verified
1 Parent(s): 6d72fc7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +16 -4
README.md CHANGED
@@ -1,13 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
1
  #Usage
2
 
3
  <pre>
4
-
5
- # pip install transformers accelerate
6
  from transformers import AutoTokenizer
7
  import transformers
8
  import torch
9
 
10
- model = "mlabonne/llama-2-7b-miniguanaco"
11
  prompt = "What is a large language model?"
12
 
13
  tokenizer = AutoTokenizer.from_pretrained(model)
@@ -17,7 +28,7 @@ pipeline = transformers.pipeline(
17
  torch_dtype=torch.float16,
18
  device_map="auto",
19
  )
20
-
21
  sequences = pipeline(
22
  f'<s>[INST] {prompt} [/INST]',
23
  do_sample=True,
@@ -29,4 +40,5 @@ sequences = pipeline(
29
  for seq in sequences:
30
  print(f"Result: {seq['generated_text']}")
31
 
 
32
  </pre>
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - Joshi-Aryan/chat_test
5
+ language:
6
+ - en
7
+ library_name: transformers
8
+ pipeline_tag: question-answering
9
+ tags:
10
+ - chat
11
+ ---
12
  #Usage
13
 
14
  <pre>
15
+
16
+ pip install transformers accelerate
17
  from transformers import AutoTokenizer
18
  import transformers
19
  import torch
20
 
21
+ model = "Joshi-Aryan/llama-2-7b-miniguanaco"
22
  prompt = "What is a large language model?"
23
 
24
  tokenizer = AutoTokenizer.from_pretrained(model)
 
28
  torch_dtype=torch.float16,
29
  device_map="auto",
30
  )
31
+
32
  sequences = pipeline(
33
  f'<s>[INST] {prompt} [/INST]',
34
  do_sample=True,
 
40
  for seq in sequences:
41
  print(f"Result: {seq['generated_text']}")
42
 
43
+
44
  </pre>