TarSh8654 commited on
Commit
f1fe838
·
verified ·
1 Parent(s): 9d07e1c

Update AI_tool.py

Browse files
Files changed (1) hide show
  1. AI_tool.py +4 -24
AI_tool.py CHANGED
@@ -3,20 +3,14 @@ import json
3
 
4
 
5
  def show_message(title, content):
6
- """
7
- A simple function to simulate showing a message.
8
- In a real application, this would be a UI element or logging.
9
- """
10
  print(f"\n--- {title} ---")
11
  print(content)
12
  print("-----------------\n")
13
 
14
 
15
  def set_processing_state(is_processing):
16
- """
17
- Simulates enabling/disabling UI elements during processing.
18
- In a real application, this would update a GUI or web interface.
19
- """
20
  if is_processing:
21
  print("Processing... Please wait.")
22
  else:
@@ -24,12 +18,7 @@ def set_processing_state(is_processing):
24
 
25
 
26
  async def generate_solution_python(user_query):
27
- """
28
- Generates a solution using Google Search for context and Gemini LLM.
29
-
30
- Args:
31
- user_query (str): The query provided by the user.
32
- """
33
  if not user_query:
34
  show_message("Input Required", "Please enter your query to get a solution.")
35
  return
@@ -87,8 +76,7 @@ Please provide a detailed and helpful solution, incorporating the provided infor
87
  llm_payload = {
88
  "contents": chat_history
89
  }
90
- # API key is automatically provided by the Canvas environment for gemini-2.0-flash
91
- # If running outside Canvas, you would need to provide your API key here.
92
  gemini_api_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key="
93
 
94
  gemini_response = requests.post(
@@ -124,11 +112,3 @@ Please provide a detailed and helpful solution, incorporating the provided infor
124
  print(response_text)
125
  print("----------------\n")
126
 
127
- # Example usage (how you would call this function)
128
- # To run this, you would need an environment that provides the /api/google_search endpoint
129
- # and handles the Gemini API key. In the Canvas environment, these are typically
130
- # available when running Python code that interacts with the tools.
131
-
132
- # You can test this in a Python environment by calling the function:
133
- # import asyncio
134
- # asyncio.run(generate_solution_python("What are the benefits of renewable energy and what are some recent innovations in solar power?"))
 
3
 
4
 
5
  def show_message(title, content):
6
+
 
 
 
7
  print(f"\n--- {title} ---")
8
  print(content)
9
  print("-----------------\n")
10
 
11
 
12
  def set_processing_state(is_processing):
13
+
 
 
 
14
  if is_processing:
15
  print("Processing... Please wait.")
16
  else:
 
18
 
19
 
20
  async def generate_solution_python(user_query):
21
+
 
 
 
 
 
22
  if not user_query:
23
  show_message("Input Required", "Please enter your query to get a solution.")
24
  return
 
76
  llm_payload = {
77
  "contents": chat_history
78
  }
79
+
 
80
  gemini_api_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key="
81
 
82
  gemini_response = requests.post(
 
112
  print(response_text)
113
  print("----------------\n")
114