Spaces:
Runtime error
Runtime error
gpt
Browse files
app.py
CHANGED
|
@@ -125,16 +125,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 125 |
print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
|
| 126 |
|
| 127 |
#NMODEL
|
| 128 |
-
|
| 129 |
llm_client = ChatGoogleGenerativeAI(
|
| 130 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 131 |
google_api_key=google_api_key, # your Gemini API key
|
| 132 |
temperature=0,
|
| 133 |
)
|
| 134 |
-
|
| 135 |
|
| 136 |
|
| 137 |
-
|
| 138 |
|
| 139 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 140 |
if not tavily_api_key:
|
|
@@ -143,7 +143,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 143 |
print(f"Using Tavily API key: {tavily_api_key[:4]}... (truncated for security)")
|
| 144 |
|
| 145 |
travily_api_search_tool = get_travily_api_search_tool(tavily_api_key)
|
| 146 |
-
tools = [travily_api_search_tool, repl_tool, file_saver_tool,audio_transcriber_tool,wikipedia_search_tool,wikipedia_full_content_tool]
|
|
|
|
| 147 |
|
| 148 |
# Pull a predefined prompt from LangChain Hub
|
| 149 |
# "hwchase17/react-chat" is a prompt template designed for ReAct-style conversational agents.
|
|
@@ -168,25 +169,12 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 168 |
IMPORTANT NOTE ON TOOL USAGE:
|
| 169 |
- If an 'Observation' from a tool does NOT directly contain the specific answer to your question, you MUST refine your query or switch to a different, more suitable tool (e.g., 'tavily_search' for broader or more current information if 'wikipedia_search_tool' was insufficient). Do NOT get stuck repeatedly using the same tool if it's not yielding the direct answer.
|
| 170 |
- If the input contains the exact phrase "Attachment '{{file_name}}' available at: {{attachment_url}}" (where '{{file_name}}' and '{{attachment_url}}' are placeholders for actual values), consider the file type:
|
| 171 |
-
- If the file type is binary/text (e.g., .xlsx, .docx, .mp3, .jpg, .pdf), you MUST use the 'file_saver' tool to download and save it.
|
| 172 |
For 'file_saver', the Action Input must be a JSON string like: '{{"url": "the_attachment_url", "local_filename": "the_file_name_from_attachment"}}'
|
| 173 |
Example: If the attachment is 'Homework.mp3' at 'https://agents-course-unit4-scoring.hf.space/files/121898981', Action Input for file_saver would be '{{"url": "https://agents-course-unit4-scoring.hf.space/files/121898981", "local_filename": "Homework.mp3"}}'
|
| 174 |
|
| 175 |
IMPORTANT: When processing audio files (like .mp3) that have been saved using 'file_saver', the 'audio_transcriber_tool' MUST be used with the 'local_filename' of the saved audio file as its Action Input. Do NOT pass URLs or remote paths directly to 'audio_transcriber_tool'.
|
| 176 |
|
| 177 |
-
If you need to count or extract items from a Wikipedia section (like a list of albums with years), use the 'wikipedia_full_content_tool' to get the section text, then use the 'python_repl' tool to parse the text and count the relevant items.
|
| 178 |
-
Example:
|
| 179 |
-
Thought: I need to count Mercedes Sosa's studio albums from 2000 to 2009.
|
| 180 |
-
Action: wikipedia_full_content_tool
|
| 181 |
-
Action Input: "Mercedes Sosa section: Discography"
|
| 182 |
-
Observation: [Discography text]
|
| 183 |
-
Thought: I need to parse this text and count albums released between 2000 and 2009.
|
| 184 |
-
Action: python_repl
|
| 185 |
-
Action Input: [Python code that parses the text and counts albums by year]
|
| 186 |
-
Observation: [Result]
|
| 187 |
-
Thought: I have found the answer.
|
| 188 |
-
Final Answer: [number]
|
| 189 |
-
|
| 190 |
If you have sufficient information and can provide a CONCISE response, or if no tool is needed, you MUST use this precise format:
|
| 191 |
if you can use a LLM to answer the question, think step-by-step and then answer the question.
|
| 192 |
Example: given a chess board image and asked to predict the next best move, if Multi-modal LLM is available, you can use it to answer the question.
|
|
@@ -231,17 +219,17 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 231 |
# Initialize gemini model with streaming enabled
|
| 232 |
# Streaming allows tokens to be processed in real-time, reducing response latency.
|
| 233 |
#NMODEL
|
| 234 |
-
|
| 235 |
summary_llm = ChatGoogleGenerativeAI(
|
| 236 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 237 |
google_api_key=google_api_key, # your Gemini API key
|
| 238 |
temperature=0,
|
| 239 |
streaming=True
|
| 240 |
)
|
| 241 |
-
|
| 242 |
|
| 243 |
|
| 244 |
-
|
| 245 |
|
| 246 |
# Create a ReAct agent
|
| 247 |
# The agent will reason and take actions based on retrieved tools and memory.
|
|
|
|
| 125 |
print(f"Using OpenAI API key: {openai_api_key[:4]}... (truncated for security)")
|
| 126 |
|
| 127 |
#NMODEL
|
| 128 |
+
'''
|
| 129 |
llm_client = ChatGoogleGenerativeAI(
|
| 130 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 131 |
google_api_key=google_api_key, # your Gemini API key
|
| 132 |
temperature=0,
|
| 133 |
)
|
| 134 |
+
'''
|
| 135 |
|
| 136 |
|
| 137 |
+
llm_client = ChatOpenAI(model='gpt-4o',temperature=0,api_key=openai_api_key)
|
| 138 |
|
| 139 |
tavily_api_key = os.getenv("TAVILY_API_KEY")
|
| 140 |
if not tavily_api_key:
|
|
|
|
| 143 |
print(f"Using Tavily API key: {tavily_api_key[:4]}... (truncated for security)")
|
| 144 |
|
| 145 |
travily_api_search_tool = get_travily_api_search_tool(tavily_api_key)
|
| 146 |
+
#tools = [travily_api_search_tool, repl_tool, file_saver_tool,audio_transcriber_tool,wikipedia_search_tool,wikipedia_full_content_tool]
|
| 147 |
+
tools = [travily_api_search_tool, repl_tool, file_saver_tool,audio_transcriber_tool]
|
| 148 |
|
| 149 |
# Pull a predefined prompt from LangChain Hub
|
| 150 |
# "hwchase17/react-chat" is a prompt template designed for ReAct-style conversational agents.
|
|
|
|
| 169 |
IMPORTANT NOTE ON TOOL USAGE:
|
| 170 |
- If an 'Observation' from a tool does NOT directly contain the specific answer to your question, you MUST refine your query or switch to a different, more suitable tool (e.g., 'tavily_search' for broader or more current information if 'wikipedia_search_tool' was insufficient). Do NOT get stuck repeatedly using the same tool if it's not yielding the direct answer.
|
| 171 |
- If the input contains the exact phrase "Attachment '{{file_name}}' available at: {{attachment_url}}" (where '{{file_name}}' and '{{attachment_url}}' are placeholders for actual values), consider the file type:
|
| 172 |
+
- If the file type is binary/text (e.g., .xlsx, .docx, .mp3, .jpg, .pdf,.png), you MUST use the 'file_saver' tool to download and save it.
|
| 173 |
For 'file_saver', the Action Input must be a JSON string like: '{{"url": "the_attachment_url", "local_filename": "the_file_name_from_attachment"}}'
|
| 174 |
Example: If the attachment is 'Homework.mp3' at 'https://agents-course-unit4-scoring.hf.space/files/121898981', Action Input for file_saver would be '{{"url": "https://agents-course-unit4-scoring.hf.space/files/121898981", "local_filename": "Homework.mp3"}}'
|
| 175 |
|
| 176 |
IMPORTANT: When processing audio files (like .mp3) that have been saved using 'file_saver', the 'audio_transcriber_tool' MUST be used with the 'local_filename' of the saved audio file as its Action Input. Do NOT pass URLs or remote paths directly to 'audio_transcriber_tool'.
|
| 177 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
If you have sufficient information and can provide a CONCISE response, or if no tool is needed, you MUST use this precise format:
|
| 179 |
if you can use a LLM to answer the question, think step-by-step and then answer the question.
|
| 180 |
Example: given a chess board image and asked to predict the next best move, if Multi-modal LLM is available, you can use it to answer the question.
|
|
|
|
| 219 |
# Initialize gemini model with streaming enabled
|
| 220 |
# Streaming allows tokens to be processed in real-time, reducing response latency.
|
| 221 |
#NMODEL
|
| 222 |
+
'''
|
| 223 |
summary_llm = ChatGoogleGenerativeAI(
|
| 224 |
model="gemini-2.0-flash", # or another Gemini model name
|
| 225 |
google_api_key=google_api_key, # your Gemini API key
|
| 226 |
temperature=0,
|
| 227 |
streaming=True
|
| 228 |
)
|
| 229 |
+
'''
|
| 230 |
|
| 231 |
|
| 232 |
+
summary_llm = ChatOpenAI(model='gpt-4o', temperature=0, streaming=True,api_key=openai_api_key)
|
| 233 |
|
| 234 |
# Create a ReAct agent
|
| 235 |
# The agent will reason and take actions based on retrieved tools and memory.
|