Spaces:
Runtime error
Runtime error
switch to gpt-40
Browse files
app.py
CHANGED
|
@@ -132,14 +132,15 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 132 |
print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
|
| 133 |
|
| 134 |
#NMODEL
|
|
|
|
| 135 |
llm_client = ChatGoogleGenerativeAI(
|
| 136 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 137 |
google_api_key=google_api_key, # your Gemini API key
|
| 138 |
temperature=0,
|
| 139 |
)
|
|
|
|
| 140 |
|
| 141 |
-
|
| 142 |
-
#llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
|
| 143 |
|
| 144 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 145 |
if not tavily_api_key:
|
|
@@ -211,17 +212,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 211 |
# Initialize gemini model with streaming enabled
|
| 212 |
# Streaming allows tokens to be processed in real-time, reducing response latency.
|
| 213 |
#NMODEL
|
| 214 |
-
|
| 215 |
summary_llm = ChatGoogleGenerativeAI(
|
| 216 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 217 |
google_api_key=google_api_key, # your Gemini API key
|
| 218 |
temperature=0,
|
| 219 |
streaming=True
|
| 220 |
)
|
|
|
|
| 221 |
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
#summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
|
| 225 |
|
| 226 |
|
| 227 |
# Create a ReAct agent
|
|
|
|
| 132 |
print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
|
| 133 |
|
| 134 |
#NMODEL
|
| 135 |
+
'''
|
| 136 |
llm_client = ChatGoogleGenerativeAI(
|
| 137 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 138 |
google_api_key=google_api_key, # your Gemini API key
|
| 139 |
temperature=0,
|
| 140 |
)
|
| 141 |
+
'''
|
| 142 |
|
| 143 |
+
llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
|
|
|
|
| 144 |
|
| 145 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 146 |
if not tavily_api_key:
|
|
|
|
| 212 |
# Initialize gemini model with streaming enabled
|
| 213 |
# Streaming allows tokens to be processed in real-time, reducing response latency.
|
| 214 |
#NMODEL
|
| 215 |
+
'''
|
| 216 |
summary_llm = ChatGoogleGenerativeAI(
|
| 217 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 218 |
google_api_key=google_api_key, # your Gemini API key
|
| 219 |
temperature=0,
|
| 220 |
streaming=True
|
| 221 |
)
|
| 222 |
+
'''
|
| 223 |
|
| 224 |
+
summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
|
|
|
|
|
|
|
| 225 |
|
| 226 |
|
| 227 |
# Create a ReAct agent
|