ThomasSimonini commited on
Commit
2a3e8ee
·
verified ·
1 Parent(s): c22676d

Upload moondream.py

Browse files
Files changed (1) hide show
  1. moondream.py +5 -2
moondream.py CHANGED
@@ -1,7 +1,7 @@
1
  import torch
2
  from .vision_encoder import VisionEncoder
3
  from .configuration_moondream import MoondreamConfig
4
- from transformers import PreTrainedModel, TextStreamer
5
 
6
  from .modeling_phi import PhiForCausalLM
7
  from .configuration_moondream import PhiConfig
@@ -62,6 +62,7 @@ class Moondream(PreTrainedModel):
62
  def get_input_embeddings(self):
63
  return self.text_model.get_input_embeddings()
64
 
 
65
  def generate(
66
  self,
67
  image_embeds,
@@ -80,13 +81,15 @@ class Moondream(PreTrainedModel):
80
 
81
  with torch.no_grad():
82
  inputs_embeds = self.input_embeds(prompt, image_embeds, tokenizer)
83
- streamer = TextStreamer(tokenizer)
84
  output_ids = self.text_model.generate(
85
  inputs_embeds=inputs_embeds, streamer=streamer, **generate_config
86
  )
87
 
88
  return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
89
 
 
 
90
  def answer_question(
91
  self,
92
  image_embeds,
 
1
  import torch
2
  from .vision_encoder import VisionEncoder
3
  from .configuration_moondream import MoondreamConfig
4
+ from transformers import PreTrainedModel, TextIteratorStreamer
5
 
6
  from .modeling_phi import PhiForCausalLM
7
  from .configuration_moondream import PhiConfig
 
62
  def get_input_embeddings(self):
63
  return self.text_model.get_input_embeddings()
64
 
65
+
66
  def generate(
67
  self,
68
  image_embeds,
 
81
 
82
  with torch.no_grad():
83
  inputs_embeds = self.input_embeds(prompt, image_embeds, tokenizer)
84
+ streamer = TextIteratorStreamer(tokenizer)
85
  output_ids = self.text_model.generate(
86
  inputs_embeds=inputs_embeds, streamer=streamer, **generate_config
87
  )
88
 
89
  return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
90
 
91
+
92
+
93
  def answer_question(
94
  self,
95
  image_embeds,