nikhmr1235 commited on
Commit
97b0c6c
·
verified ·
1 Parent(s): b2799ca

modify prompt to use webdownloadertool only to fetch attachments

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -132,15 +132,14 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
132
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
133
 
134
  #NMODEL
135
- '''
136
  llm_client = ChatGoogleGenerativeAI(
137
  model="gemini-2.0-flash", # or another Gemini model name
138
  google_api_key=google_api_key, # your Gemini API key
139
  temperature=0,
140
  )
141
- '''
142
 
143
- llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
 
144
 
145
  tavily_api_key = os.getenv("TAVILY_API_KEY")
146
  if not tavily_api_key:
@@ -170,6 +169,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
170
  Action Input: [input_for_the_tool]
171
  Observation: [result_from_the_tool]
172
 
 
 
173
  If you have sufficient information and can provide a concise response, or if no tool is needed, you MUST use this precise format:
174
 
175
  Thought: I have enough information, or no tool is needed.
@@ -211,17 +212,17 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
211
  # Initialize gemini model with streaming enabled
212
  # Streaming allows tokens to be processed in real-time, reducing response latency.
213
  #NMODEL
214
- '''
215
  summary_llm = ChatGoogleGenerativeAI(
216
  model="gemini-2.0-flash", # or another Gemini model name
217
  google_api_key=google_api_key, # your Gemini API key
218
  temperature=0,
219
  streaming=True
220
  )
221
- '''
222
 
223
 
224
- summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
225
 
226
 
227
  # Create a ReAct agent
 
132
  print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
133
 
134
  #NMODEL
 
135
  llm_client = ChatGoogleGenerativeAI(
136
  model="gemini-2.0-flash", # or another Gemini model name
137
  google_api_key=google_api_key, # your Gemini API key
138
  temperature=0,
139
  )
 
140
 
141
+
142
+ #llm_client = ChatOpenAI(model='gpt-4o',temperature=0.1,api_key=openai_api_key)
143
 
144
  tavily_api_key = os.getenv("TAVILY_API_KEY")
145
  if not tavily_api_key:
 
169
  Action Input: [input_for_the_tool]
170
  Observation: [result_from_the_tool]
171
 
172
+ NOTE: use web_downloader_limited tool ONLY if the input has text: "Attachment '{file_name}' available at: {attachment_url}", otherwise use the tavily_search tool.
173
+
174
  If you have sufficient information and can provide a concise response, or if no tool is needed, you MUST use this precise format:
175
 
176
  Thought: I have enough information, or no tool is needed.
 
212
  # Initialize gemini model with streaming enabled
213
  # Streaming allows tokens to be processed in real-time, reducing response latency.
214
  #NMODEL
215
+
216
  summary_llm = ChatGoogleGenerativeAI(
217
  model="gemini-2.0-flash", # or another Gemini model name
218
  google_api_key=google_api_key, # your Gemini API key
219
  temperature=0,
220
  streaming=True
221
  )
222
+
223
 
224
 
225
+ #summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
226
 
227
 
228
  # Create a ReAct agent