TarSh8654 commited on
Commit
18327d7
·
verified ·
1 Parent(s): f0e4e8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -12
app.py CHANGED
@@ -1,30 +1,123 @@
1
  # app.py
2
- from flask import Flask, request, jsonify
3
  import requests
4
  import json
5
- import asyncio # Import asyncio for running async functions
 
6
 
7
  app = Flask(__name__)
8
 
9
- # (Include your generate_solution_python function here,
10
- # making sure to adjust the API endpoint for google_search if necessary)
 
 
 
11
 
12
- # Assuming generate_solution_python is defined as in your Canvas
13
- # You'll need to make sure the requests.post for google_search points to a valid endpoint
14
- # In a real Hugging Face Space, you might not have a direct /api/google_search.
15
- # You'd need to consider how to integrate a search API (e.g., Google Custom Search API, SerpAPI)
16
- # and handle its API key securely.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  @app.route('/generate', methods=['POST'])
19
  async def generate():
 
20
  data = request.json
21
  user_query = data.get('query')
22
  if not user_query:
23
  return jsonify({"error": "Query is required"}), 400
24
 
25
  # Run the async function
26
- response_text = await generate_solution_python(user_query)
27
- return jsonify({"solution": response_text})
28
 
29
  if __name__ == '__main__':
30
- app.run(host='0.0.0.0', port=7860) # Hugging Face Spaces typically use port 7860
 
 
1
  # app.py
2
+ from flask import Flask, request, jsonify, render_template
3
  import requests
4
  import json
5
+ import asyncio
6
+ import os # Import os to access environment variables
7
 
8
  app = Flask(__name__)
9
 
10
+ # --- Your AI Tool Logic (Modified for Hugging Face) ---
11
+ async def generate_solution_python(user_query):
12
+ """
13
+ Generates a solution using a dummy context (since google_search is not available)
14
+ and Gemini LLM.
15
 
16
+ Args:
17
+ user_query (str): The query provided by the user.
18
+ """
19
+ if not user_query:
20
+ return "Please enter your query to get a solution."
21
+
22
+ print(f"Processing query: {user_query}")
23
+ response_text = ""
24
+
25
+ try:
26
+ # --- IMPORTANT: Placeholder for Search API Integration ---
27
+ # The 'google_search' tool is specific to the Canvas environment.
28
+ # On Hugging Face, you would integrate a real public search API here,
29
+ # e.g., Google Custom Search API, SerpAPI, or a web scraping library.
30
+ # For this example, we'll use a dummy context.
31
+ dummy_context = f"Information related to '{user_query}' from various online sources indicates that..."
32
+
33
+ # In a real scenario, you'd make an API call like this (example with a hypothetical search API):
34
+ # search_api_key = os.environ.get("YOUR_SEARCH_API_KEY")
35
+ # search_api_url = "https://api.example.com/search"
36
+ # search_response = requests.get(search_api_url, params={"q": user_query, "api_key": search_api_key})
37
+ # search_response.raise_for_status()
38
+ # search_results = search_response.json()
39
+ # context = process_search_results(search_results) # A function to extract snippets
40
+
41
+ context = dummy_context # Using dummy context for now
42
+
43
+ # Step 2: Construct prompt for LLM with context
44
+ chat_history = []
45
+ prompt = f"""You are an AI assistant that provides comprehensive solutions based on the given query and additional context from open sources.
46
+
47
+ User Query: {user_query}
48
+
49
+ Relevant Open-Source Information:
50
+ {context}
51
+
52
+ Please provide a detailed and helpful solution, incorporating the provided information where relevant. If the information is insufficient, state that and provide a general answer.
53
+ """
54
+
55
+ chat_history.append({"role": "user", "parts": [{"text": prompt}]})
56
+
57
+ # Step 3: Call Gemini API
58
+ print("Calling Gemini API...")
59
+ llm_payload = {
60
+ "contents": chat_history
61
+ }
62
+
63
+ # Get API key from environment variables (Hugging Face Space Secrets)
64
+ gemini_api_key = os.environ.get("GEMINI_API_KEY")
65
+ if not gemini_api_key:
66
+ raise ValueError("GEMINI_API_KEY environment variable not set.")
67
+
68
+ gemini_api_url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={gemini_api_key}"
69
+
70
+ gemini_response = requests.post(
71
+ gemini_api_url,
72
+ headers={'Content-Type': 'application/json'},
73
+ data=json.dumps(llm_payload)
74
+ )
75
+
76
+ gemini_response.raise_for_status() # Raise an exception for HTTP errors
77
+ llm_result = gemini_response.json()
78
+ print("Gemini API response received.")
79
+
80
+ if llm_result.get('candidates') and len(llm_result['candidates']) > 0 and \
81
+ llm_result['candidates'][0].get('content') and llm_result['candidates'][0]['content'].get('parts') and \
82
+ len(llm_result['candidates'][0]['content']['parts']) > 0:
83
+ response_text = llm_result['candidates'][0]['content']['parts'][0]['text']
84
+ else:
85
+ response_text = "No solution could be generated. Please try a different query."
86
+
87
+ except requests.exceptions.RequestException as e:
88
+ error_message = f"Network or API error: {e}"
89
+ print(f"Error: {error_message}")
90
+ response_text = f"An error occurred: {error_message}. Please check the logs for details."
91
+ except ValueError as e:
92
+ error_message = f"Configuration error: {e}"
93
+ print(f"Error: {error_message}")
94
+ response_text = f"An error occurred: {error_message}. Please check the logs for details."
95
+ except Exception as e:
96
+ error_message = f"An unexpected error occurred: {e}"
97
+ print(f"Error: {error_message}")
98
+ response_text = f"An error occurred: {error_message}. Please check the logs for details."
99
+
100
+ return response_text
101
+
102
+ # --- Flask Routes ---
103
+
104
+ @app.route('/')
105
+ def index():
106
+ """Serves the main HTML page."""
107
+ return render_template('index.html')
108
 
109
  @app.route('/generate', methods=['POST'])
110
  async def generate():
111
+ """Handles the AI generation request."""
112
  data = request.json
113
  user_query = data.get('query')
114
  if not user_query:
115
  return jsonify({"error": "Query is required"}), 400
116
 
117
  # Run the async function
118
+ solution = await generate_solution_python(user_query)
119
+ return jsonify({"solution": solution})
120
 
121
  if __name__ == '__main__':
122
+ # Hugging Face Spaces typically expect the app to run on port 7860
123
+ app.run(host='0.0.0.0', port=7860)