ranamhamoud commited on
Commit
46868f3
·
verified ·
1 Parent(s): 273429b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -89,16 +89,16 @@ def generate(
89
  else:
90
  model = editing_model
91
  tokenizer = editing_tokenizer
 
92
  for user, assistant in chat_history:
93
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
94
- conversation.append({"role": "user", "content": make_prompt(message)})
95
- # enc = tokenizer(make_prompt(message), return_tensors="pt", padding=True, truncation=True)
96
- # input_ids = enc.input_ids.to(model.device)
97
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
98
 
 
99
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
100
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
101
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
 
102
 
103
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=False)
104
  generate_kwargs = dict(
 
89
  else:
90
  model = editing_model
91
  tokenizer = editing_tokenizer
92
+
93
  for user, assistant in chat_history:
94
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
95
+ conversation.append({"role": "user", "content": message})
 
 
 
96
 
97
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
98
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
99
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
100
  gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
101
+ input_ids = input_ids.to(model.device)
102
 
103
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=False)
104
  generate_kwargs = dict(