ThomasSimonini commited on
Commit
441499d
·
verified ·
1 Parent(s): 3523a7a

Upload moondream.py

Browse files
Files changed (1) hide show
  1. moondream.py +20 -1
moondream.py CHANGED
@@ -87,6 +87,7 @@ class Moondream(PreTrainedModel):
87
 
88
  return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
89
 
 
90
  def answer_question(
91
  self,
92
  image_embeds,
@@ -105,14 +106,32 @@ class Moondream(PreTrainedModel):
105
  **kwargs,
106
  )[0]
107
  cleaned_answer = answer.strip()
108
- print("DONE")
109
 
110
  # Use the result_queue to pass the result if it is provided
111
  if result_queue:
112
  result_queue.put(cleaned_answer)
113
  else:
114
  return cleaned_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
 
 
116
  def batch_answer(
117
  self,
118
  images,
 
87
 
88
  return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
89
 
90
+ """
91
  def answer_question(
92
  self,
93
  image_embeds,
 
106
  **kwargs,
107
  )[0]
108
  cleaned_answer = answer.strip()
 
109
 
110
  # Use the result_queue to pass the result if it is provided
111
  if result_queue:
112
  result_queue.put(cleaned_answer)
113
  else:
114
  return cleaned_answer
115
+ """
116
+ def answer_question(
117
+ self,
118
+ image_embeds,
119
+ question,
120
+ tokenizer,
121
+ chat_history="",
122
+ result_queue=None,
123
+ **kwargs,
124
+ ):
125
+ prompt = f"<image>\n\n{chat_history}Question: {question}\n\nAnswer:"
126
+ streamer = TextStreamer(tokenizer)
127
+ output_ids = self.text_model.generate(
128
+ inputs_embeds=self.input_embeds(prompt, image_embeds, tokenizer),
129
+ streamer=streamer,
130
+ **kwargs,
131
+ )
132
 
133
+ for output_id in output_ids:
134
+ yield tokenizer.decode(output_id, skip_special_tokens=True)
135
  def batch_answer(
136
  self,
137
  images,