Update app.py
Browse files
app.py
CHANGED
|
@@ -226,42 +226,6 @@ def umwandeln_fuer_anzeige(image):
|
|
| 226 |
|
| 227 |
|
| 228 |
|
| 229 |
-
##########################################
|
| 230 |
-
#ein hochgeladenes Bild so vorbereiten, dass OpenAI API es annehmen kann und bearbeiten
|
| 231 |
-
#muss ein base64 Bils sein und header und payload entsprechend konfigurieren
|
| 232 |
-
def process_image(image_path, prompt):
|
| 233 |
-
# Convert image to base64
|
| 234 |
-
with open(image_path, "rb") as image_file:
|
| 235 |
-
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
|
| 236 |
-
|
| 237 |
-
# Prepare the data for the API request (specific to the API you're using)
|
| 238 |
-
headers = {
|
| 239 |
-
"Content-Type": "application/json",
|
| 240 |
-
"Authorization": f"Bearer {OAI_API_KEY}"
|
| 241 |
-
}
|
| 242 |
-
payload = {
|
| 243 |
-
"model": MODEL_NAME_IMAGE,
|
| 244 |
-
"messages": [
|
| 245 |
-
{
|
| 246 |
-
"role": "user",
|
| 247 |
-
"content": [
|
| 248 |
-
{
|
| 249 |
-
"type": "text",
|
| 250 |
-
"text": llm_template + prompt
|
| 251 |
-
},
|
| 252 |
-
{
|
| 253 |
-
"type": "image_url",
|
| 254 |
-
"image_url": {
|
| 255 |
-
"url": f"data:image/jpeg;base64,{encoded_string}"
|
| 256 |
-
}
|
| 257 |
-
}
|
| 258 |
-
]
|
| 259 |
-
}
|
| 260 |
-
],
|
| 261 |
-
"max_tokens": 300
|
| 262 |
-
}
|
| 263 |
-
return headers, payload
|
| 264 |
-
|
| 265 |
##################################################
|
| 266 |
#openassistant um uploaded Files zu analysieren
|
| 267 |
def create_assistant_file(prompt, file):
|
|
@@ -399,7 +363,7 @@ def generate_text_zu_bild(file, prompt, k, rag_option, chatbot, history, db):
|
|
| 399 |
#als reiner prompt:
|
| 400 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, history)
|
| 401 |
|
| 402 |
-
headers, payload = process_image(file, prompt_neu)
|
| 403 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
| 404 |
#als json ausgeben
|
| 405 |
data = response.json()
|
|
|
|
| 226 |
|
| 227 |
|
| 228 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
##################################################
|
| 230 |
#openassistant um uploaded Files zu analysieren
|
| 231 |
def create_assistant_file(prompt, file):
|
|
|
|
| 363 |
#als reiner prompt:
|
| 364 |
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, history)
|
| 365 |
|
| 366 |
+
headers, payload = process_image(file, prompt_neu, MODEL_NAME_IMAGE)
|
| 367 |
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
| 368 |
#als json ausgeben
|
| 369 |
data = response.json()
|