LPX55 commited on
Commit
8e51466
·
verified ·
1 Parent(s): 20d1485

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +1 -0
raw.py CHANGED
@@ -105,6 +105,7 @@ def caption(input_image: Image.Image, prompt: str, temperature: float, top_p: fl
105
  # WARNING: HF's handling of chat's on Llava models is very fragile. This specific combination of processor.apply_chat_template(), and processor() works
106
  # but if using other combinations always inspect the final input_ids to ensure they are correct. Often times you will end up with multiple <bos> tokens
107
  # if not careful, which can make the model perform poorly.
 
108
  convo_string = cap_processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
109
  assert isinstance(convo_string, str)
110
  inputs = cap_processor(text=[convo_string], images=[input_image], return_tensors="pt").to('cuda')
 
105
  # WARNING: HF's handling of chat's on Llava models is very fragile. This specific combination of processor.apply_chat_template(), and processor() works
106
  # but if using other combinations always inspect the final input_ids to ensure they are correct. Often times you will end up with multiple <bos> tokens
107
  # if not careful, which can make the model perform poorly.
108
+
109
  convo_string = cap_processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
110
  assert isinstance(convo_string, str)
111
  inputs = cap_processor(text=[convo_string], images=[input_image], return_tensors="pt").to('cuda')