KevinHuSh
commited on
Commit
·
f5f274f
1
Parent(s):
886bc17
fix multi-modual bug (#1127)
Browse files### What problem does this PR solve?
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
- rag/llm/cv_model.py +5 -1
rag/llm/cv_model.py
CHANGED
|
@@ -75,10 +75,14 @@ class GptV4(Base):
|
|
| 75 |
|
| 76 |
def describe(self, image, max_tokens=300):
|
| 77 |
b64 = self.image2base64(image)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
res = self.client.chat.completions.create(
|
| 80 |
model=self.model_name,
|
| 81 |
-
messages=
|
| 82 |
max_tokens=max_tokens,
|
| 83 |
)
|
| 84 |
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
|
|
|
| 75 |
|
| 76 |
def describe(self, image, max_tokens=300):
|
| 77 |
b64 = self.image2base64(image)
|
| 78 |
+
prompt = self.prompt(b64)
|
| 79 |
+
for i in range(len(prompt)):
|
| 80 |
+
for c in prompt[i]["content"]:
|
| 81 |
+
if "type" in c: del c["type"]
|
| 82 |
|
| 83 |
res = self.client.chat.completions.create(
|
| 84 |
model=self.model_name,
|
| 85 |
+
messages=prompt,
|
| 86 |
max_tokens=max_tokens,
|
| 87 |
)
|
| 88 |
return res.choices[0].message.content.strip(), res.usage.total_tokens
|