File size: 4,472 Bytes
4777810
 
 
 
 
f1fe838
4777810
 
 
 
 
 
f1fe838
4777810
 
 
 
 
 
 
f1fe838
4777810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1fe838
4777810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import requests
import json


def show_message(title, content):
    
    print(f"\n--- {title} ---")
    print(content)
    print("-----------------\n")


def set_processing_state(is_processing):
    
    if is_processing:
        print("Processing... Please wait.")
    else:
        print("Processing complete.")


async def generate_solution_python(user_query):
    
    if not user_query:
        show_message("Input Required", "Please enter your query to get a solution.")
        return

    set_processing_state(True)
    response_text = ""

    try:
        # Step 1: Use google_search to get relevant information
        print(f"Searching for information related to: {user_query}")
        search_payload = {
            "queries": [user_query]
        }
        # The '/api/google_search' endpoint is provided by the environment
        search_response = requests.post(
            'http://localhost:8000/api/google_search',
            # Placeholder URL, replace with actual endpoint if running outside Canvas
            headers={'Content-Type': 'application/json'},
            data=json.dumps(search_payload)
        )

        search_response.raise_for_status()  # Raise an exception for HTTP errors
        search_result = search_response.json()
        print("Search results received.")

        context = ""
        if search_result.get('results'):
            for query_result in search_result['results']:
                if query_result.get('results'):
                    for item_index, item in enumerate(query_result['results']):
                        if item.get('snippet'):
                            # Limiting context to avoid excessively long prompts
                            context += f"[Source {item_index + 1}] {item['snippet']}\n"
                            if len(context) > 2000:  # Simple context length limit
                                context += "...\n"
                                break
                    if len(context) > 2000:
                        break

        # Step 2: Construct prompt for LLM with search context
        chat_history = []
        prompt = f"""You are an AI assistant that provides comprehensive solutions based on the given query and additional context from open sources.

User Query: {user_query}

{context if context else 'No specific open-source information found for this query.'}

Please provide a detailed and helpful solution, incorporating the provided information where relevant. If the information is insufficient, state that and provide a general answer.
"""

        chat_history.append({"role": "user", "parts": [{"text": prompt}]})

        # Step 3: Call Gemini API
        print("Calling Gemini API...")
        llm_payload = {
            "contents": chat_history
        }
       
        gemini_api_url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key="

        gemini_response = requests.post(
            gemini_api_url,
            headers={'Content-Type': 'application/json'},
            data=json.dumps(llm_payload)
        )

        gemini_response.raise_for_status()  # Raise an exception for HTTP errors
        llm_result = gemini_response.json()
        print("Gemini API response received.")

        if llm_result.get('candidates') and len(llm_result['candidates']) > 0 and \
                llm_result['candidates'][0].get('content') and llm_result['candidates'][0]['content'].get('parts') and \
                len(llm_result['candidates'][0]['content']['parts']) > 0:
            response_text = llm_result['candidates'][0]['content']['parts'][0]['text']
        else:
            response_text = "No solution could be generated. Please try a different query."

    except requests.exceptions.RequestException as e:
        error_message = f"Network or API error: {e}"
        print(f"Error: {error_message}")
        show_message("Generation Error", error_message)
        response_text = f"An error occurred: {error_message}. Please check the console for details."
    except Exception as e:
        error_message = f"An unexpected error occurred: {e}"
        print(f"Error: {error_message}")
        show_message("Generation Error", error_message)
        response_text = f"An error occurred: {error_message}. Please check the console for details."
    finally:
        set_processing_state(False)
        print("\n--- Solution ---")
        print(response_text)
        print("----------------\n")