Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -348,6 +348,38 @@ def chat_tmp():
|
|
| 348 |
response_data = {'creator': 'api.Kastg.com', 'status': 'success', 'chat-id': chat_id, 'messages': TOKEN_MESSAGES_TMP[chat_id]}
|
| 349 |
return jsonify(response_data)
|
| 350 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
|
| 352 |
if __name__ == "__main__":
|
| 353 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|
|
| 348 |
response_data = {'creator': 'api.Kastg.com', 'status': 'success', 'chat-id': chat_id, 'messages': TOKEN_MESSAGES_TMP[chat_id]}
|
| 349 |
return jsonify(response_data)
|
| 350 |
|
| 351 |
+
|
| 352 |
+
@app.route('/messages', methods=['POST'])
|
| 353 |
+
def handle_message():
|
| 354 |
+
try:
|
| 355 |
+
# Get the data from the request JSON
|
| 356 |
+
data = request.json
|
| 357 |
+
messages = data.get('messages', [])
|
| 358 |
+
model = data.get('model', 'gpt-4o-mini')
|
| 359 |
+
|
| 360 |
+
if not messages:
|
| 361 |
+
return jsonify({"error": "No messages provided"}), 400
|
| 362 |
+
|
| 363 |
+
# Validate the structure of messages
|
| 364 |
+
for message in messages:
|
| 365 |
+
if 'role' not in message or 'content' not in message:
|
| 366 |
+
return jsonify({"error": "Invalid message format"}), 400
|
| 367 |
+
|
| 368 |
+
# Use the G4F client to get a response
|
| 369 |
+
response = client.chat.completions.create(
|
| 370 |
+
model=model,
|
| 371 |
+
messages=messages
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
# Extract the response content
|
| 375 |
+
ai_response = response.choices[0].message.content
|
| 376 |
+
|
| 377 |
+
# Return the response as JSON
|
| 378 |
+
return jsonify({"response": ai_response})
|
| 379 |
+
|
| 380 |
+
except Exception as e:
|
| 381 |
+
return jsonify({"error": str(e)}), 500
|
| 382 |
+
|
| 383 |
|
| 384 |
if __name__ == "__main__":
|
| 385 |
app.run(host="0.0.0.0", port=7860, debug=True)
|