lruizap commited on
Commit
07f635e
·
1 Parent(s): 0ce09fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -25
app.py CHANGED
@@ -30,6 +30,11 @@ def generate(inputuno, inputdos, max_new_tokens=3556, top_p=0.95, repetition_pen
30
  seed=42,
31
  )
32
 
 
 
 
 
 
33
  messages = [
34
  {
35
  "role": "system", "content": str(new_prompt)
@@ -47,31 +52,6 @@ def generate(inputuno, inputdos, max_new_tokens=3556, top_p=0.95, repetition_pen
47
  yield output
48
  return output
49
 
50
-
51
- def generatePrompt(inputuno, inputdos):
52
-
53
- prompt = inputuno
54
- promptdos = inputdos
55
-
56
- batch = tokenizer(prompt, return_tensors="pt")
57
- generated_ids = model.generate(batch["input_ids"])
58
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
59
- new_prompt = output[0]
60
-
61
- messages = [
62
- {
63
- "role": "system", "content": str(new_prompt)
64
- },
65
- {
66
- "role": "user", "content": str(promptdos)
67
- },
68
- ]
69
- # https://huggingface.co/docs/transformers/main/en/chat_templating
70
- final_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
71
-
72
- outputs = pipe(final_prompt, do_sample=True,)
73
-
74
- return outputs[0]["generated_text"]
75
  #
76
 
77
  # Interface
 
30
  seed=42,
31
  )
32
 
33
+ batch = tokenizer(prompt, return_tensors="pt")
34
+ generated_ids = model.generate(batch["input_ids"])
35
+ output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
36
+ new_prompt = output[0]
37
+
38
  messages = [
39
  {
40
  "role": "system", "content": str(new_prompt)
 
52
  yield output
53
  return output
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  #
56
 
57
  # Interface