Update app.py
Browse files
app.py
CHANGED
|
@@ -2,18 +2,20 @@ from flask import Flask, request, jsonify, render_template
|
|
| 2 |
import os
|
| 3 |
from flask_cors import CORS
|
| 4 |
from googletrans import Translator
|
| 5 |
-
|
| 6 |
|
| 7 |
-
# Load API key
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
-
#
|
| 16 |
-
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
| 17 |
|
| 18 |
# Set the static folder path to the "static" folder
|
| 19 |
STATIC_FOLDER = os.path.join(os.path.dirname(__file__), "static")
|
|
@@ -71,12 +73,17 @@ def chat():
|
|
| 71 |
{"role": "user", "content": f"Context Data:\n{CONTEXT_DATA}\n\nUser Query: {user_message}"}
|
| 72 |
]
|
| 73 |
|
| 74 |
-
# Call
|
| 75 |
-
|
| 76 |
-
|
|
|
|
| 77 |
messages=messages,
|
| 78 |
-
temperature=0.7
|
|
|
|
|
|
|
| 79 |
)
|
|
|
|
|
|
|
| 80 |
ai_response = response.choices[0].message.content.strip()
|
| 81 |
|
| 82 |
if target_lang.lower() != "en":
|
|
@@ -84,8 +91,9 @@ def chat():
|
|
| 84 |
|
| 85 |
return jsonify({"response": ai_response})
|
| 86 |
except Exception as e:
|
|
|
|
| 87 |
return jsonify({"error": str(e)}), 500
|
| 88 |
|
| 89 |
|
| 90 |
if __name__ == '__main__':
|
| 91 |
-
app.run(debug=True, host='0.0.0.0', port=7860, threaded=True)
|
|
|
|
| 2 |
import os
|
| 3 |
from flask_cors import CORS
|
| 4 |
from googletrans import Translator
|
| 5 |
+
import litellm # MODIFIED: Import litellm instead of OpenAI
|
| 6 |
|
| 7 |
+
# Load API key, base URL, and model from environment variables
|
| 8 |
+
# LiteLLM can automatically read OPENAI_API_KEY, but we'll read it explicitly for clarity
|
| 9 |
+
API_KEY = os.getenv("OPENAI_API_KEY") or os.getenv("GEMINI_API_KEY")
|
| 10 |
+
if not API_KEY:
|
| 11 |
+
raise ValueError("API Key is missing. Set OPENAI_API_KEY or GEMINI_API_KEY in environment variables.")
|
| 12 |
|
| 13 |
+
# Note: For litellm, the parameter is 'api_base' not 'base_url'
|
| 14 |
+
API_BASE = os.getenv("OPENAI_API_BASE", "https://generativelanguage.googleapis.com/v1beta/openai/")
|
| 15 |
+
MODEL_NAME = os.getenv("OPENAI_MODEL", "gemini-1.5-flash") # Default model
|
| 16 |
|
| 17 |
+
# REMOVED: No need to instantiate a client with litellm
|
| 18 |
+
# client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
| 19 |
|
| 20 |
# Set the static folder path to the "static" folder
|
| 21 |
STATIC_FOLDER = os.path.join(os.path.dirname(__file__), "static")
|
|
|
|
| 73 |
{"role": "user", "content": f"Context Data:\n{CONTEXT_DATA}\n\nUser Query: {user_message}"}
|
| 74 |
]
|
| 75 |
|
| 76 |
+
# MODIFIED: Call litellm.completion directly
|
| 77 |
+
# Pass the model, messages, api_key, and api_base here
|
| 78 |
+
response = litellm.completion(
|
| 79 |
+
model=MODEL_NAME,
|
| 80 |
messages=messages,
|
| 81 |
+
temperature=0.7,
|
| 82 |
+
api_key=API_KEY,
|
| 83 |
+
api_base=API_BASE
|
| 84 |
)
|
| 85 |
+
|
| 86 |
+
# The response structure is the same as OpenAI's, so this part doesn't change
|
| 87 |
ai_response = response.choices[0].message.content.strip()
|
| 88 |
|
| 89 |
if target_lang.lower() != "en":
|
|
|
|
| 91 |
|
| 92 |
return jsonify({"response": ai_response})
|
| 93 |
except Exception as e:
|
| 94 |
+
# LiteLLM can raise specific exceptions, but catching the general one is fine
|
| 95 |
return jsonify({"error": str(e)}), 500
|
| 96 |
|
| 97 |
|
| 98 |
if __name__ == '__main__':
|
| 99 |
+
app.run(debug=True, host='0.0.0.0', port=7860, threaded=True)
|