lruizap commited on
Commit
c11aa2e
ยท
1 Parent(s): 7ed48bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -14,30 +14,25 @@ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",
14
  torch_dtype=torch.bfloat16, device_map="auto")
15
 
16
 
17
- def useZephyr(prompt, promptdos):
 
 
 
 
 
18
  messages = [
19
  {
20
  "role": "system",
21
  "content": "you are a chatbot who always responds politely and in the shortest possible way",
22
  },
23
- {"role": "user", "content": prompt},
24
  {"role": "user", "content": promptdos},
25
  ]
26
  # https://huggingface.co/docs/transformers/main/en/chat_templating
27
- new_prompt = pipe.tokenizer.apply_chat_template(
28
- messages, tokenize=False, add_generation_prompt=True)
29
-
30
- outputs = pipe(new_prompt)
31
- return outputs[0]["generated_text"]
32
-
33
-
34
- def generatePrompt(prompt, promptdos):
35
- batch = tokenizer(prompt, return_tensors="pt")
36
- generated_ids = model.generate(batch["input_ids"])
37
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
38
- prompt = output[0]
39
 
40
- result = useZephyr(prompt, promptdos)
 
41
 
42
  return result
43
  #
@@ -52,6 +47,6 @@ examples = [["photographer"], ["developer"], ["teacher"], [
52
  "human resources staff"], ["recipe for ham croquettes"]]
53
  description = ""
54
 
55
- PerfectGPT = gr.Interface(generatePrompt, inputs=[input_prompt, input_promptdos], outputs=output_component, examples=examples, title="๐Ÿ—ฟ PerfectGPT v1 ๐Ÿ—ฟ", description=description)
56
 
57
  PerfectGPT.launch()
 
14
  torch_dtype=torch.bfloat16, device_map="auto")
15
 
16
 
17
+ def generatePrompt(prompt, promptdos):
18
+ batch = tokenizer(prompt, return_tensors="pt")
19
+ generated_ids = model.generate(batch["input_ids"])
20
+ output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
21
+ new_prompt = output[0]
22
+
23
  messages = [
24
  {
25
  "role": "system",
26
  "content": "you are a chatbot who always responds politely and in the shortest possible way",
27
  },
28
+ {"role": "user", "content": new_prompt},
29
  {"role": "user", "content": promptdos},
30
  ]
31
  # https://huggingface.co/docs/transformers/main/en/chat_templating
32
+ final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ outputs = pipe(final_prompt)
35
+ result = outputs[0]["generated_text"]
36
 
37
  return result
38
  #
 
47
  "human resources staff"], ["recipe for ham croquettes"]]
48
  description = ""
49
 
50
+ PerfectGPT = gr.Interface(generatePrompt, inputs=(input_prompt, input_promptdos), outputs=output_component, examples=examples, title="๐Ÿ—ฟ PerfectGPT v1 ๐Ÿ—ฟ", description=description)
51
 
52
  PerfectGPT.launch()