TarSh8654 commited on
Commit
4777810
·
verified ·
1 Parent(s): 5135da1

Create AI_tool.py

Browse files
Files changed (1) hide show
  1. AI_tool.py +134 -0
AI_tool.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+
5
+ def show_message(title, content):
6
+ """
7
+ A simple function to simulate showing a message.
8
+ In a real application, this would be a UI element or logging.
9
+ """
10
+ print(f"\n--- {title} ---")
11
+ print(content)
12
+ print("-----------------\n")
13
+
14
+
15
+ def set_processing_state(is_processing):
16
+ """
17
+ Simulates enabling/disabling UI elements during processing.
18
+ In a real application, this would update a GUI or web interface.
19
+ """
20
+ if is_processing:
21
+ print("Processing... Please wait.")
22
+ else:
23
+ print("Processing complete.")
24
+
25
+
26
+ async def generate_solution_python(user_query):
27
+ """
28
+ Generates a solution using Google Search for context and Gemini LLM.
29
+
30
+ Args:
31
+ user_query (str): The query provided by the user.
32
+ """
33
+ if not user_query:
34
+ show_message("Input Required", "Please enter your query to get a solution.")
35
+ return
36
+
37
+ set_processing_state(True)
38
+ response_text = ""
39
+
40
+ try:
41
+ # Step 1: Use google_search to get relevant information
42
+ print(f"Searching for information related to: {user_query}")
43
+ search_payload = {
44
+ "queries": [user_query]
45
+ }
46
+ # The '/api/google_search' endpoint is provided by the environment
47
+ search_response = requests.post(
48
+ 'http://localhost:8000/api/google_search',
49
+ # Placeholder URL, replace with actual endpoint if running outside Canvas
50
+ headers={'Content-Type': 'application/json'},
51
+ data=json.dumps(search_payload)
52
+ )
53
+
54
+ search_response.raise_for_status() # Raise an exception for HTTP errors
55
+ search_result = search_response.json()
56
+ print("Search results received.")
57
+
58
+ context = ""
59
+ if search_result.get('results'):
60
+ for query_result in search_result['results']:
61
+ if query_result.get('results'):
62
+ for item_index, item in enumerate(query_result['results']):
63
+ if item.get('snippet'):
64
+ # Limiting context to avoid excessively long prompts
65
+ context += f"[Source {item_index + 1}] {item['snippet']}\n"
66
+ if len(context) > 2000: # Simple context length limit
67
+ context += "...\n"
68
+ break
69
+ if len(context) > 2000:
70
+ break
71
+
72
+ # Step 2: Construct prompt for LLM with search context
73
+ chat_history = []
74
+ prompt = f"""You are an AI assistant that provides comprehensive solutions based on the given query and additional context from open sources.
75
+
76
+ User Query: {user_query}
77
+
78
+ {context if context else 'No specific open-source information found for this query.'}
79
+
80
+ Please provide a detailed and helpful solution, incorporating the provided information where relevant. If the information is insufficient, state that and provide a general answer.
81
+ """
82
+
83
+ chat_history.append({"role": "user", "parts": [{"text": prompt}]})
84
+
85
+ # Step 3: Call Gemini API
86
+ print("Calling Gemini API...")
87
+ llm_payload = {
88
+ "contents": chat_history
89
+ }
90
+ # API key is automatically provided by the Canvas environment for gemini-2.0-flash
91
+ # If running outside Canvas, you would need to provide your API key here.
92
+ gemini_api_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key="
93
+
94
+ gemini_response = requests.post(
95
+ gemini_api_url,
96
+ headers={'Content-Type': 'application/json'},
97
+ data=json.dumps(llm_payload)
98
+ )
99
+
100
+ gemini_response.raise_for_status() # Raise an exception for HTTP errors
101
+ llm_result = gemini_response.json()
102
+ print("Gemini API response received.")
103
+
104
+ if llm_result.get('candidates') and len(llm_result['candidates']) > 0 and \
105
+ llm_result['candidates'][0].get('content') and llm_result['candidates'][0]['content'].get('parts') and \
106
+ len(llm_result['candidates'][0]['content']['parts']) > 0:
107
+ response_text = llm_result['candidates'][0]['content']['parts'][0]['text']
108
+ else:
109
+ response_text = "No solution could be generated. Please try a different query."
110
+
111
+ except requests.exceptions.RequestException as e:
112
+ error_message = f"Network or API error: {e}"
113
+ print(f"Error: {error_message}")
114
+ show_message("Generation Error", error_message)
115
+ response_text = f"An error occurred: {error_message}. Please check the console for details."
116
+ except Exception as e:
117
+ error_message = f"An unexpected error occurred: {e}"
118
+ print(f"Error: {error_message}")
119
+ show_message("Generation Error", error_message)
120
+ response_text = f"An error occurred: {error_message}. Please check the console for details."
121
+ finally:
122
+ set_processing_state(False)
123
+ print("\n--- Solution ---")
124
+ print(response_text)
125
+ print("----------------\n")
126
+
127
+ # Example usage (how you would call this function)
128
+ # To run this, you would need an environment that provides the /api/google_search endpoint
129
+ # and handles the Gemini API key. In the Canvas environment, these are typically
130
+ # available when running Python code that interacts with the tools.
131
+
132
+ # You can test this in a Python environment by calling the function:
133
+ # import asyncio
134
+ # asyncio.run(generate_solution_python("What are the benefits of renewable energy and what are some recent innovations in solar power?"))