Update utils.py
Browse files
utils.py
CHANGED
|
@@ -316,9 +316,69 @@ def generate_prompt_with_history_langchain(prompt, history):
|
|
| 316 |
|
| 317 |
return history_langchain_format
|
| 318 |
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
|
| 323 |
|
| 324 |
########################################################
|
|
|
|
| 316 |
|
| 317 |
return history_langchain_format
|
| 318 |
|
| 319 |
+
##########################################
|
| 320 |
+
#Json für OpenAI Genaeration Chat zusammenstellen
|
| 321 |
+
##########################################
|
| 322 |
+
##########################################
|
| 323 |
+
#ein hochgeladenes Bild so vorbereiten, dass OpenAI API es annehmen kann und bearbeiten
|
| 324 |
+
#muss ein base64 Bils sein und header und payload entsprechend konfigurieren
|
| 325 |
+
def process_image(image_path, prompt, model_image):
|
| 326 |
+
# Convert image to base64
|
| 327 |
+
with open(image_path, "rb") as image_file:
|
| 328 |
+
encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
|
| 329 |
+
|
| 330 |
+
# Prepare the data for the API request (specific to the API you're using)
|
| 331 |
+
headers = {
|
| 332 |
+
"Content-Type": "application/json",
|
| 333 |
+
"Authorization": f"Bearer {OAI_API_KEY}"
|
| 334 |
+
}
|
| 335 |
+
payload = {
|
| 336 |
+
"model": model_image,
|
| 337 |
+
"messages": [
|
| 338 |
+
{
|
| 339 |
+
"role": "user",
|
| 340 |
+
"content": [
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": llm_template + prompt
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "image_url",
|
| 347 |
+
"image_url": {
|
| 348 |
+
"url": f"data:image/jpeg;base64,{encoded_string}"
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
]
|
| 352 |
+
}
|
| 353 |
+
],
|
| 354 |
+
"max_tokens": 300
|
| 355 |
+
}
|
| 356 |
+
return headers, payload
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def process_chatverlauf(prompt, model):
|
| 360 |
+
# Prepare the data for the API request (specific to the API you're using)
|
| 361 |
+
headers = {
|
| 362 |
+
"Content-Type": "application/json",
|
| 363 |
+
"Authorization": f"Bearer {OAI_API_KEY}"
|
| 364 |
+
}
|
| 365 |
+
payload = {
|
| 366 |
+
"model": model,
|
| 367 |
+
"messages": [
|
| 368 |
+
{
|
| 369 |
+
"role": "user",
|
| 370 |
+
"content": [
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": 'Gib folgendem Text eine Überschrift mit maximal 3 Worten' + prompt
|
| 374 |
+
},
|
| 375 |
+
}
|
| 376 |
+
]
|
| 377 |
+
}
|
| 378 |
+
],
|
| 379 |
+
"max_tokens": 100
|
| 380 |
+
}
|
| 381 |
+
return headers, payload
|
| 382 |
|
| 383 |
|
| 384 |
########################################################
|