ranamhamoud commited on
Commit
0539d0d
·
verified ·
1 Parent(s): 97a0217

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -70,7 +70,6 @@ def process_text(text):
70
  return text
71
 
72
 
73
- # Gradio Function
74
  @spaces.GPU
75
  def generate(
76
  model_choice: str,
@@ -89,9 +88,17 @@ def generate(
89
  else:
90
  model = editing_model
91
  tokenizer = editing_tokenizer
92
-
93
- for user, assistant in chat_history:
94
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
 
 
 
 
 
 
 
 
95
  conversation.append({"role": "user", "content": message})
96
 
97
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
@@ -102,7 +109,7 @@ def generate(
102
 
103
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
104
  generate_kwargs = dict(
105
- {"input_ids": input_ids},
106
  streamer=streamer,
107
  max_new_tokens=max_new_tokens,
108
  do_sample=True,
 
70
  return text
71
 
72
 
 
73
  @spaces.GPU
74
  def generate(
75
  model_choice: str,
 
88
  else:
89
  model = editing_model
90
  tokenizer = editing_tokenizer
91
+
92
+ # Checking each tuple in chat_history to ensure it has exactly two elements
93
+ for item in chat_history:
94
+ if isinstance(item, tuple) and len(item) == 2:
95
+ user, assistant = item
96
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
97
+ else:
98
+ print(f"Error in chat history item: {item}. Each item must be a tuple with exactly two elements.")
99
+ continue # Skip this item or handle appropriately
100
+
101
+ # Append the current user message
102
  conversation.append({"role": "user", "content": message})
103
 
104
  input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
 
109
 
110
  streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
111
  generate_kwargs = dict(
112
+ input_ids=input_ids,
113
  streamer=streamer,
114
  max_new_tokens=max_new_tokens,
115
  do_sample=True,