roykim commited on
Commit
7c2f196
·
1 Parent(s): 660a03f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +191 -1
README.md CHANGED
@@ -2,4 +2,194 @@
2
  license: apache-2.0
3
  language:
4
  - ko
5
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  license: apache-2.0
3
  language:
4
  - ko
5
+ library_name: transformers
6
+ pipeline_tag: conversational
7
+ tags:
8
+ - chatting
9
+ ---
10
+
11
+ This made for korean chatting model.
12
+
13
+ ```Python
14
+ import os
15
+ import sys
16
+
17
+ import fire
18
+ # import gradio as gr
19
+ import torch
20
+ import transformers
21
+ from peft import PeftModel
22
+ from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
23
+
24
+ from utils.callbacks import Iteratorize, Stream
25
+ from utils.prompter import Prompter
26
+
27
+ if torch.cuda.is_available():
28
+ device = "cuda"
29
+ else:
30
+ device = "cpu"
31
+
32
+ try:
33
+ if torch.backends.mps.is_available():
34
+ device = "mps"
35
+ except: # noqa: E722
36
+ pass
37
+
38
+
39
+ def main(
40
+ load_8bit: bool = False,
41
+ base_model: str = "",
42
+ lora_weights: str = "tloen/alpaca-lora-7b",
43
+ prompt_template: str = "", # The prompt template to use, will default to alpaca.
44
+ server_name: str = "0.0.0.0", # Allows to listen on all interfaces by providing '0.
45
+ share_gradio: bool = False,
46
+ ):
47
+ base_model = base_model or os.environ.get("BASE_MODEL", "")
48
+ assert (
49
+ base_model
50
+ ), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
51
+
52
+ prompter = Prompter(prompt_template)
53
+ tokenizer = LlamaTokenizer.from_pretrained(base_model)
54
+ if device == "cuda":
55
+ model = LlamaForCausalLM.from_pretrained(
56
+ base_model,
57
+ load_in_8bit=load_8bit,
58
+ torch_dtype=torch.float16,
59
+ device_map="auto",
60
+ )
61
+ model = PeftModel.from_pretrained(
62
+ model,
63
+ lora_weights,
64
+ torch_dtype=torch.float16,
65
+ )
66
+ elif device == "mps":
67
+ model = LlamaForCausalLM.from_pretrained(
68
+ base_model,
69
+ device_map={"": device},
70
+ torch_dtype=torch.float16,
71
+ )
72
+ model = PeftModel.from_pretrained(
73
+ model,
74
+ lora_weights,
75
+ device_map={"": device},
76
+ torch_dtype=torch.float16,
77
+ )
78
+ else:
79
+ model = LlamaForCausalLM.from_pretrained(
80
+ base_model, device_map={"": device}, low_cpu_mem_usage=True
81
+ )
82
+ model = PeftModel.from_pretrained(
83
+ model,
84
+ lora_weights,
85
+ device_map={"": device},
86
+ )
87
+
88
+ # unwind broken decapoda-research config
89
+ model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
90
+ model.config.bos_token_id = 1
91
+ model.config.eos_token_id = 2
92
+
93
+ if not load_8bit:
94
+ model.half() # seems to fix bugs for some users.
95
+
96
+ model.eval()
97
+ if torch.__version__ >= "2" and sys.platform != "win32":
98
+ model = torch.compile(model)
99
+
100
+ def evaluate(
101
+ instruction,
102
+ input=None,
103
+ temperature=0.1,
104
+ top_p=0.75,
105
+ top_k=40,
106
+ num_beams=4,
107
+ max_new_tokens=256,
108
+ repetition_penalty=4.8,
109
+ stream_output=False,
110
+ **kwargs,
111
+ ):
112
+ prompt = prompter.generate_prompt(instruction, input)
113
+ inputs = tokenizer(prompt, return_tensors="pt")
114
+ input_ids = inputs["input_ids"].to(device)
115
+ generation_config = GenerationConfig(
116
+ temperature=temperature,
117
+ top_p=top_p,
118
+ top_k=top_k,
119
+ num_beams=num_beams,
120
+ repetition_penalty=float(repetition_penalty),
121
+ **kwargs,
122
+ )
123
+
124
+ generate_params = {
125
+ "input_ids": input_ids,
126
+ "generation_config": generation_config,
127
+ "return_dict_in_generate": True,
128
+ "output_scores": True,
129
+ "max_new_tokens": max_new_tokens,
130
+ }
131
+
132
+ if stream_output:
133
+ # Stream the reply 1 token at a time.
134
+ # This is based on the trick of using 'stopping_criteria' to create an iterator,
135
+ # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243.
136
+
137
+ def generate_with_callback(callback=None, **kwargs):
138
+ kwargs.setdefault(
139
+ "stopping_criteria", transformers.StoppingCriteriaList()
140
+ )
141
+ kwargs["stopping_criteria"].append(
142
+ Stream(callback_func=callback)
143
+ )
144
+ with torch.no_grad():
145
+ model.generate(**kwargs)
146
+
147
+ def generate_with_streaming(**kwargs):
148
+ return Iteratorize(
149
+ generate_with_callback, kwargs, callback=None
150
+ )
151
+
152
+ with generate_with_streaming(**generate_params) as generator:
153
+ for output in generator:
154
+ # new_tokens = len(output) - len(input_ids[0])
155
+ decoded_output = tokenizer.decode(output)
156
+
157
+ if output[-1] in [tokenizer.eos_token_id]:
158
+ break
159
+
160
+ yield prompter.get_response(decoded_output)
161
+ return # early return for stream_output
162
+
163
+ # Without streaming
164
+ with torch.no_grad():
165
+ generation_output = model.generate(
166
+ input_ids=input_ids,
167
+ generation_config=generation_config,
168
+ return_dict_in_generate=True,
169
+ output_scores=True,
170
+ max_new_tokens=max_new_tokens,
171
+ )
172
+ s = generation_output.sequences[0]
173
+ output = tokenizer.decode(s)
174
+ yield prompter.get_response(output)
175
+ # testing code for readme
176
+ for instruction in [
177
+ "Tell me about alpacas.",
178
+ "Tell me about the president of Mexico in 2019.",
179
+ "Tell me about the king of France in 2019.",
180
+ "List all Canadian provinces in alphabetical order.",
181
+ "Write a Python program that prints the first 10 Fibonacci numbers.",
182
+ "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.", # noqa: E501
183
+ "Tell me five words that rhyme with 'shock'.",
184
+ "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
185
+ "Count up from 1 to 500.",
186
+ ]:
187
+ print("Instruction:", instruction)
188
+ print("Response:", evaluate(instruction))
189
+ print()
190
+
191
+
192
+ if __name__ == "__main__":
193
+ fire.Fire(main)
194
+
195
+ ```