nikhmr1235 commited on
Commit
de5159d
·
verified ·
1 Parent(s): 106a856
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -125,16 +125,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
125
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
126
 
127
  #NMODEL
128
- '''
129
  llm_client = ChatGoogleGenerativeAI(
130
  model="gemini-2.0-flash", # or another Gemini model name
131
  google_api_key=google_api_key, # your Gemini API key
132
  temperature=0,
133
  )
134
- '''
135
 
136
 
137
- llm_client = ChatOpenAI(model='gpt-4o',temperature=0,api_key=openai_api_key)
138
 
139
  tavily_api_key = os.getenv("TAVILY_API_KEY")
140
  if not tavily_api_key:
@@ -219,17 +219,17 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
219
  # Initialize gemini model with streaming enabled
220
  # Streaming allows tokens to be processed in real-time, reducing response latency.
221
  #NMODEL
222
- '''
223
  summary_llm = ChatGoogleGenerativeAI(
224
  model="gemini-2.0-flash", # or another Gemini model name
225
  google_api_key=google_api_key, # your Gemini API key
226
  temperature=0,
227
  streaming=True
228
  )
229
- '''
230
 
231
 
232
- summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
233
 
234
  # Create a ReAct agent
235
  # The agent will reason and take actions based on retrieved tools and memory.
 
125
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
126
 
127
  #NMODEL
128
+ #'''
129
  llm_client = ChatGoogleGenerativeAI(
130
  model="gemini-2.0-flash", # or another Gemini model name
131
  google_api_key=google_api_key, # your Gemini API key
132
  temperature=0,
133
  )
134
+ #'''
135
 
136
 
137
+ #llm_client = ChatOpenAI(model='gpt-4o',temperature=0,api_key=openai_api_key)
138
 
139
  tavily_api_key = os.getenv("TAVILY_API_KEY")
140
  if not tavily_api_key:
 
219
  # Initialize gemini model with streaming enabled
220
  # Streaming allows tokens to be processed in real-time, reducing response latency.
221
  #NMODEL
222
+ #'''
223
  summary_llm = ChatGoogleGenerativeAI(
224
  model="gemini-2.0-flash", # or another Gemini model name
225
  google_api_key=google_api_key, # your Gemini API key
226
  temperature=0,
227
  streaming=True
228
  )
229
+ #'''
230
 
231
 
232
+ #summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
233
 
234
  # Create a ReAct agent
235
  # The agent will reason and take actions based on retrieved tools and memory.