English
naveensp commited on
Commit
4e9d1c9
·
verified ·
1 Parent(s): 98a4f1b

Delete conversation.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. conversation.py +0 -396
conversation.py DELETED
@@ -1,396 +0,0 @@
1
- import dataclasses
2
- from enum import auto, Enum
3
- from typing import List, Tuple
4
- import base64
5
- from io import BytesIO
6
- from PIL import Image
7
-
8
-
9
- class SeparatorStyle(Enum):
10
- """Different separator style."""
11
- SINGLE = auto()
12
- TWO = auto()
13
- MPT = auto()
14
- PLAIN = auto()
15
- LLAMA_2 = auto()
16
-
17
-
18
- @dataclasses.dataclass
19
- class Conversation:
20
- """A class that keeps all conversation history."""
21
- system: str
22
- roles: List[str]
23
- messages: List[List[str]]
24
- offset: int
25
- sep_style: SeparatorStyle = SeparatorStyle.SINGLE
26
- sep: str = "###"
27
- sep2: str = None
28
- version: str = "Unknown"
29
-
30
- skip_next: bool = False
31
-
32
- def get_prompt(self):
33
- messages = self.messages
34
- if len(messages) > 0 and type(messages[0][1]) is tuple:
35
- messages = self.messages.copy()
36
- init_role, init_msg = messages[0].copy()
37
- init_msg = init_msg[0].replace("<image>", "").strip()
38
- if 'mmtag' in self.version:
39
- messages[0] = (init_role, init_msg)
40
- messages.insert(0, (self.roles[0], "<Image><image></Image>"))
41
- messages.insert(1, (self.roles[1], "Received."))
42
- else:
43
- messages[0] = (init_role, "<image>\n" + init_msg)
44
-
45
- if self.sep_style == SeparatorStyle.SINGLE:
46
- ret = self.system + self.sep
47
- for role, message in messages:
48
- if message:
49
- if type(message) is tuple:
50
- message, _, _ = message
51
- ret += role + ": " + message + self.sep
52
- else:
53
- ret += role + ":"
54
- elif self.sep_style == SeparatorStyle.TWO:
55
- seps = [self.sep, self.sep2]
56
- ret = self.system + seps[0]
57
- for i, (role, message) in enumerate(messages):
58
- if message:
59
- if type(message) is tuple:
60
- message, _, _ = message
61
- ret += role + ": " + message + seps[i % 2]
62
- else:
63
- ret += role + ":"
64
- elif self.sep_style == SeparatorStyle.MPT:
65
- ret = self.system + self.sep
66
- for role, message in messages:
67
- if message:
68
- if type(message) is tuple:
69
- message, _, _ = message
70
- ret += role + message + self.sep
71
- else:
72
- ret += role
73
- elif self.sep_style == SeparatorStyle.LLAMA_2:
74
- wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
75
- wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
76
- ret = ""
77
-
78
- for i, (role, message) in enumerate(messages):
79
- if i == 0:
80
- assert message, "first message should not be none"
81
- assert role == self.roles[0], "first message should come from user"
82
- if message:
83
- if type(message) is tuple:
84
- message, _, _ = message
85
- if i == 0: message = wrap_sys(self.system) + message
86
- if i % 2 == 0:
87
- message = wrap_inst(message)
88
- ret += self.sep + message
89
- else:
90
- ret += " " + message + " " + self.sep2
91
- else:
92
- ret += ""
93
- ret = ret.lstrip(self.sep)
94
- elif self.sep_style == SeparatorStyle.PLAIN:
95
- seps = [self.sep, self.sep2]
96
- ret = self.system
97
- for i, (role, message) in enumerate(messages):
98
- if message:
99
- if type(message) is tuple:
100
- message, _, _ = message
101
- ret += message + seps[i % 2]
102
- else:
103
- ret += ""
104
- else:
105
- raise ValueError(f"Invalid style: {self.sep_style}")
106
-
107
- return ret
108
-
109
- def append_message(self, role, message):
110
- self.messages.append([role, message])
111
-
112
- def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=1344, min_len=672):
113
- if image_process_mode == "Pad":
114
- def expand2square(pil_img, background_color=(122, 116, 104)):
115
- width, height = pil_img.size
116
- if width == height:
117
- return pil_img
118
- elif width > height:
119
- result = Image.new(pil_img.mode, (width, width), background_color)
120
- result.paste(pil_img, (0, (width - height) // 2))
121
- return result
122
- else:
123
- result = Image.new(pil_img.mode, (height, height), background_color)
124
- result.paste(pil_img, ((height - width) // 2, 0))
125
- return result
126
- image = expand2square(image)
127
- elif image_process_mode in ["Default", "Crop"]:
128
- pass
129
- elif image_process_mode == "Resize":
130
- image = image.resize((336, 336))
131
- else:
132
- raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
133
- if max(image.size) > max_len:
134
- max_hw, min_hw = max(image.size), min(image.size)
135
- aspect_ratio = max_hw / min_hw
136
- shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
137
- longest_edge = int(shortest_edge * aspect_ratio)
138
- W, H = image.size
139
- if H > W:
140
- H, W = longest_edge, shortest_edge
141
- else:
142
- H, W = shortest_edge, longest_edge
143
- image = image.resize((W, H))
144
- if return_pil:
145
- return image
146
- else:
147
- buffered = BytesIO()
148
- image.save(buffered, format=image_format)
149
- img_b64_str = base64.b64encode(buffered.getvalue()).decode()
150
- return img_b64_str
151
-
152
- def get_images(self, return_pil=False):
153
- images = []
154
- for i, (role, msg) in enumerate(self.messages[self.offset:]):
155
- if i % 2 == 0:
156
- if type(msg) is tuple:
157
- msg, image, image_process_mode = msg
158
- image = self.process_image(image, image_process_mode, return_pil=return_pil)
159
- images.append(image)
160
- return images
161
-
162
- def to_gradio_chatbot(self):
163
- ret = []
164
- for i, (role, msg) in enumerate(self.messages[self.offset:]):
165
- if i % 2 == 0:
166
- if type(msg) is tuple:
167
- msg, image, image_process_mode = msg
168
- img_b64_str = self.process_image(
169
- image, "Default", return_pil=False,
170
- image_format='JPEG')
171
- img_str = f'<img src="data:image/jpeg;base64,{img_b64_str}" alt="user upload image" />'
172
- msg = img_str + msg.replace('<image>', '').strip()
173
- ret.append([msg, None])
174
- else:
175
- ret.append([msg, None])
176
- else:
177
- ret[-1][-1] = msg
178
- return ret
179
-
180
- def copy(self):
181
- return Conversation(
182
- system=self.system,
183
- roles=self.roles,
184
- messages=[[x, y] for x, y in self.messages],
185
- offset=self.offset,
186
- sep_style=self.sep_style,
187
- sep=self.sep,
188
- sep2=self.sep2,
189
- version=self.version)
190
-
191
- def dict(self):
192
- if len(self.get_images()) > 0:
193
- return {
194
- "system": self.system,
195
- "roles": self.roles,
196
- "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
197
- "offset": self.offset,
198
- "sep": self.sep,
199
- "sep2": self.sep2,
200
- }
201
- return {
202
- "system": self.system,
203
- "roles": self.roles,
204
- "messages": self.messages,
205
- "offset": self.offset,
206
- "sep": self.sep,
207
- "sep2": self.sep2,
208
- }
209
-
210
-
211
- conv_vicuna_v0 = Conversation(
212
- system="A chat between a curious human and an artificial intelligence assistant. "
213
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
214
- roles=("Human", "Assistant"),
215
- messages=(
216
- ("Human", "What are the key differences between renewable and non-renewable energy sources?"),
217
- ("Assistant",
218
- "Renewable energy sources are those that can be replenished naturally in a relatively "
219
- "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
220
- "Non-renewable energy sources, on the other hand, are finite and will eventually be "
221
- "depleted, such as coal, oil, and natural gas. Here are some key differences between "
222
- "renewable and non-renewable energy sources:\n"
223
- "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
224
- "energy sources are finite and will eventually run out.\n"
225
- "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
226
- "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
227
- "and other negative effects.\n"
228
- "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
229
- "have lower operational costs than non-renewable sources.\n"
230
- "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
231
- "locations than non-renewable sources.\n"
232
- "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
233
- "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
234
- "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
235
- "non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
236
- ),
237
- offset=2,
238
- sep_style=SeparatorStyle.SINGLE,
239
- sep="###",
240
- )
241
-
242
- conv_vicuna_v1 = Conversation(
243
- system="A chat between a curious user and an artificial intelligence assistant. "
244
- "The assistant gives helpful, detailed, and polite answers to the user's questions.",
245
- roles=("USER", "ASSISTANT"),
246
- version="v1",
247
- messages=(),
248
- offset=0,
249
- sep_style=SeparatorStyle.TWO,
250
- sep=" ",
251
- sep2="</s>",
252
- )
253
-
254
- conv_llama_2 = Conversation(
255
- system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
256
-
257
- If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
258
- roles=("USER", "ASSISTANT"),
259
- version="llama_v2",
260
- messages=(),
261
- offset=0,
262
- sep_style=SeparatorStyle.LLAMA_2,
263
- sep="<s>",
264
- sep2="</s>",
265
- )
266
-
267
- conv_llava_llama_2 = Conversation(
268
- system="You are a helpful language and vision assistant. "
269
- "You are able to understand the visual content that the user provides, "
270
- "and assist the user with a variety of tasks using natural language.",
271
- roles=("USER", "ASSISTANT"),
272
- version="llama_v2",
273
- messages=(),
274
- offset=0,
275
- sep_style=SeparatorStyle.LLAMA_2,
276
- sep="<s>",
277
- sep2="</s>",
278
- )
279
-
280
- conv_mpt = Conversation(
281
- system="""<|im_start|>system
282
- A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
283
- roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
284
- version="mpt",
285
- messages=(),
286
- offset=0,
287
- sep_style=SeparatorStyle.MPT,
288
- sep="<|im_end|>",
289
- )
290
-
291
- conv_llava_plain = Conversation(
292
- system="",
293
- roles=("", ""),
294
- messages=(
295
- ),
296
- offset=0,
297
- sep_style=SeparatorStyle.PLAIN,
298
- sep="\n",
299
- )
300
-
301
- conv_llava_v0 = Conversation(
302
- system="A chat between a curious human and an artificial intelligence assistant. "
303
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
304
- roles=("Human", "Assistant"),
305
- messages=(
306
- ),
307
- offset=0,
308
- sep_style=SeparatorStyle.SINGLE,
309
- sep="###",
310
- )
311
-
312
- conv_llava_v0_mmtag = Conversation(
313
- system="A chat between a curious user and an artificial intelligence assistant. "
314
- "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
315
- "The visual content will be provided with the following format: <Image>visual content</Image>.",
316
- roles=("Human", "Assistant"),
317
- messages=(
318
- ),
319
- offset=0,
320
- sep_style=SeparatorStyle.SINGLE,
321
- sep="###",
322
- version="v0_mmtag",
323
- )
324
-
325
- conv_llava_v1 = Conversation(
326
- system="A chat between a curious human and an artificial intelligence assistant. "
327
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
328
- roles=("USER", "ASSISTANT"),
329
- version="v1",
330
- messages=(),
331
- offset=0,
332
- sep_style=SeparatorStyle.TWO,
333
- sep=" ",
334
- sep2="</s>",
335
- )
336
-
337
- conv_llava_v1_mmtag = Conversation(
338
- system="A chat between a curious user and an artificial intelligence assistant. "
339
- "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
340
- "The visual content will be provided with the following format: <Image>visual content</Image>.",
341
- roles=("USER", "ASSISTANT"),
342
- messages=(),
343
- offset=0,
344
- sep_style=SeparatorStyle.TWO,
345
- sep=" ",
346
- sep2="</s>",
347
- version="v1_mmtag",
348
- )
349
-
350
- conv_mistral_instruct = Conversation(
351
- system="",
352
- roles=("USER", "ASSISTANT"),
353
- version="llama_v2",
354
- messages=(),
355
- offset=0,
356
- sep_style=SeparatorStyle.LLAMA_2,
357
- sep="",
358
- sep2="</s>",
359
- )
360
-
361
- conv_chatml_direct = Conversation(
362
- system="""<|im_start|>system
363
- Answer the questions.""",
364
- roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
365
- version="mpt",
366
- messages=(),
367
- offset=0,
368
- sep_style=SeparatorStyle.MPT,
369
- sep="<|im_end|>",
370
- )
371
-
372
- default_conversation = conv_vicuna_v1
373
- conv_templates = {
374
- "default": conv_vicuna_v0,
375
- "v0": conv_vicuna_v0,
376
- "v1": conv_vicuna_v1,
377
- "vicuna_v1": conv_vicuna_v1,
378
- "llama_2": conv_llama_2,
379
- "mistral_instruct": conv_mistral_instruct,
380
- "chatml_direct": conv_chatml_direct,
381
- "mistral_direct": conv_chatml_direct,
382
-
383
- "plain": conv_llava_plain,
384
- "v0_plain": conv_llava_plain,
385
- "llava_v0": conv_llava_v0,
386
- "v0_mmtag": conv_llava_v0_mmtag,
387
- "llava_v1": conv_llava_v1,
388
- "v1_mmtag": conv_llava_v1_mmtag,
389
- "llava_llama_2": conv_llava_llama_2,
390
-
391
- "mpt": conv_mpt,
392
- }
393
-
394
-
395
- if __name__ == "__main__":
396
- print(default_conversation.get_prompt())