nikhmr1235 commited on
Commit
742b076
·
verified ·
1 Parent(s): 3c581fb

switch to gemini apis + wiki tool

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -23,7 +23,7 @@ from langchain_openai import ChatOpenAI
23
  from openai import OpenAI
24
 
25
  # tools imported from helper.py
26
- from helper import repl_tool, get_travily_api_search_tool,text_downloader_limited_tool,audio_transcriber_tool
27
 
28
 
29
 
@@ -132,15 +132,15 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
132
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
133
 
134
  #NMODEL
135
- '''
136
  llm_client = ChatGoogleGenerativeAI(
137
  model="gemini-2.0-flash", # or another Gemini model name
138
  google_api_key=google_api_key, # your Gemini API key
139
  temperature=0,
140
  )
141
- '''
142
 
143
- llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
 
144
 
145
  tavily_api_key = os.getenv("TAVILY_API_KEY")
146
  if not tavily_api_key:
@@ -149,7 +149,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
149
  print(f"Using Tavily API key: {tavily_api_key[:4]}... (truncated for security)")
150
 
151
  travily_api_search_tool = get_travily_api_search_tool(tavily_api_key)
152
- tools = [travily_api_search_tool, repl_tool, text_downloader_limited_tool,audio_transcriber_tool]
153
 
154
  # Pull a predefined prompt from LangChain Hub
155
  # "hwchase17/react-chat" is a prompt template designed for ReAct-style conversational agents.
@@ -216,16 +216,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
216
  # Initialize gemini model with streaming enabled
217
  # Streaming allows tokens to be processed in real-time, reducing response latency.
218
  #NMODEL
219
- '''
220
  summary_llm = ChatGoogleGenerativeAI(
221
  model="gemini-2.0-flash", # or another Gemini model name
222
  google_api_key=google_api_key, # your Gemini API key
223
  temperature=0,
224
  streaming=True
225
  )
226
- '''
227
 
228
- summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
 
229
 
230
 
231
  # Create a ReAct agent
 
23
  from openai import OpenAI
24
 
25
  # tools imported from helper.py
26
+ from helper import repl_tool, get_travily_api_search_tool,text_downloader_limited_tool,audio_transcriber_tool,wikipedia_search_tool
27
 
28
 
29
 
 
132
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
133
 
134
  #NMODEL
135
+
136
  llm_client = ChatGoogleGenerativeAI(
137
  model="gemini-2.0-flash", # or another Gemini model name
138
  google_api_key=google_api_key, # your Gemini API key
139
  temperature=0,
140
  )
 
141
 
142
+
143
+ #llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
144
 
145
  tavily_api_key = os.getenv("TAVILY_API_KEY")
146
  if not tavily_api_key:
 
149
  print(f"Using Tavily API key: {tavily_api_key[:4]}... (truncated for security)")
150
 
151
  travily_api_search_tool = get_travily_api_search_tool(tavily_api_key)
152
+ tools = [travily_api_search_tool, repl_tool, text_downloader_limited_tool,audio_transcriber_tool,wikipedia_search_tool]
153
 
154
  # Pull a predefined prompt from LangChain Hub
155
  # "hwchase17/react-chat" is a prompt template designed for ReAct-style conversational agents.
 
216
  # Initialize gemini model with streaming enabled
217
  # Streaming allows tokens to be processed in real-time, reducing response latency.
218
  #NMODEL
219
+
220
  summary_llm = ChatGoogleGenerativeAI(
221
  model="gemini-2.0-flash", # or another Gemini model name
222
  google_api_key=google_api_key, # your Gemini API key
223
  temperature=0,
224
  streaming=True
225
  )
 
226
 
227
+
228
+ #summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
229
 
230
 
231
  # Create a ReAct agent