TarSh8654 commited on
Commit
cf7c52d
·
verified ·
1 Parent(s): 3c6a0df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -44
app.py CHANGED
@@ -3,25 +3,31 @@ from flask import Flask, request, jsonify, render_template
3
  import requests
4
  import json
5
  import asyncio
6
- import os # Import os to access environment variables
 
7
 
8
  app = Flask(__name__)
9
 
10
- # --- Your AI Tool Logic (Modified for Hugging Face) ---
11
- async def generate_solution_python(user_query):
 
 
 
 
12
  """
13
  Generates a solution using a dummy context (since google_search is not available)
14
- and Gemini LLM.
15
 
16
  Args:
17
- user_query (str): The query provided by the user.
 
18
  Returns:
19
  str: The generated solution text or an error message.
20
  """
21
- if not user_query:
22
- return "Error: Query is required."
23
 
24
- print(f"Processing query: {user_query}")
25
  response_text = ""
26
 
27
  try:
@@ -29,39 +35,25 @@ async def generate_solution_python(user_query):
29
  # The 'google_search' tool is specific to the Canvas environment.
30
  # On Hugging Face, you would integrate a real public search API here,
31
  # e.g., Google Custom Search API, SerpAPI, or a web scraping library.
32
- # For this example, we'll use a dummy context.
33
- dummy_context = f"Information related to '{user_query}' from various online sources indicates that..."
34
-
35
- # In a real scenario, you'd make an API call like this (example with a hypothetical search API):
36
- # search_api_key = os.environ.get("YOUR_SEARCH_API_KEY")
37
- # if not search_api_key:
38
- # raise ValueError("YOUR_SEARCH_API_KEY environment variable not set.")
39
- # search_api_url = "https://api.example.com/search"
40
- # search_response = requests.get(search_api_url, params={"q": user_query, "api_key": search_api_key})
41
- # search_response.raise_for_status()
42
- # search_results = search_response.json()
43
- # context = process_search_results(search_results) # A function to extract snippets
44
-
45
- context = dummy_context # Using dummy context for now
46
-
47
- # Step 2: Construct prompt for LLM with context
48
- chat_history = []
49
- prompt = f"""You are an AI assistant that provides comprehensive solutions based on the given query and additional context from open sources.
50
 
51
- User Query: {user_query}
 
 
 
 
 
52
 
53
- Relevant Open-Source Information:
54
- {context}
55
-
56
- Please provide a detailed and helpful solution, incorporating the provided information where relevant. If the information is insufficient, state that and provide a general answer.
57
- """
58
-
59
- chat_history.append({"role": "user", "parts": [{"text": prompt}]})
60
 
61
- # Step 3: Call Gemini API
62
- print("Calling Gemini API...")
63
  llm_payload = {
64
- "contents": chat_history
65
  }
66
 
67
  # Get API key from environment variables (Hugging Face Space Secrets)
@@ -112,26 +104,42 @@ def index():
112
 
113
  @app.route('/generate', methods=['POST'])
114
  async def generate():
115
- """Handles the AI generation request."""
116
  try:
117
- # Try to parse JSON from the request body
118
  data = request.get_json()
119
  if not data:
120
  return jsonify({"error": "Request body must be JSON"}), 400
121
 
122
  user_query = data.get('query')
 
 
123
  if not user_query:
124
  return jsonify({"error": "Query is required in the request body"}), 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
- # Run the async function
127
- solution = await generate_solution_python(user_query)
128
- return jsonify({"solution": solution})
129
 
130
  except Exception as e:
131
- # Catch any unexpected errors during request processing or function call
132
  print(f"Error in /generate endpoint: {e}")
133
  return jsonify({"error": f"Internal server error: {e}"}), 500
134
 
135
  if __name__ == '__main__':
136
- # Hugging Face Spaces typically expect the app to run on port 7860
137
  app.run(host='0.0.0.0', port=7860)
 
3
  import requests
4
  import json
5
  import asyncio
6
+ import os
7
+ import uuid # For generating unique session IDs if not provided
8
 
9
  app = Flask(__name__)
10
 
11
+ # In-memory storage for conversation histories
12
+ # This will reset if the Flask application restarts.
13
+ # For persistent history, a database (like Firestore) is required.
14
+ conversation_histories = {}
15
+
16
+ async def generate_solution_python(chat_history):
17
  """
18
  Generates a solution using a dummy context (since google_search is not available)
19
+ and Gemini LLM, based on the provided chat history.
20
 
21
  Args:
22
+ chat_history (list): A list of message objects representing the conversation.
23
+ Each object has "role" and "parts" (e.g., [{"text": "..."}]).
24
  Returns:
25
  str: The generated solution text or an error message.
26
  """
27
+ if not chat_history:
28
+ return "Error: Chat history is empty."
29
 
30
+ print(f"Processing chat history length: {len(chat_history)}")
31
  response_text = ""
32
 
33
  try:
 
35
  # The 'google_search' tool is specific to the Canvas environment.
36
  # On Hugging Face, you would integrate a real public search API here,
37
  # e.g., Google Custom Search API, SerpAPI, or a web scraping library.
38
+ # For this example, we'll use a dummy context based on the latest user query.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Find the latest user query to generate a relevant dummy context
41
+ latest_user_query = ""
42
+ for message in reversed(chat_history):
43
+ if message["role"] == "user" and message["parts"] and message["parts"][0].get("text"):
44
+ latest_user_query = message["parts"][0]["text"]
45
+ break
46
 
47
+ dummy_context = f"Information related to '{latest_user_query}' from various online sources indicates that..."
48
+
49
+ # You could also inject this context into the chat_history as a system message
50
+ # or prepend it to the latest user message's text if you want the LLM to explicitly
51
+ # see it as part of the conversation flow. For now, it's implicitly part of the prompt.
 
 
52
 
53
+ # Step 2: Call Gemini API with the full chat history
54
+ print("Calling Gemini API with full chat history...")
55
  llm_payload = {
56
+ "contents": chat_history # Pass the entire history
57
  }
58
 
59
  # Get API key from environment variables (Hugging Face Space Secrets)
 
104
 
105
  @app.route('/generate', methods=['POST'])
106
  async def generate():
107
+ """Handles the AI generation request, managing conversation history."""
108
  try:
 
109
  data = request.get_json()
110
  if not data:
111
  return jsonify({"error": "Request body must be JSON"}), 400
112
 
113
  user_query = data.get('query')
114
+ session_id = data.get('session_id')
115
+
116
  if not user_query:
117
  return jsonify({"error": "Query is required in the request body"}), 400
118
+ if not session_id:
119
+ # Generate a session ID if not provided (should be provided by frontend)
120
+ session_id = str(uuid.uuid4())
121
+ print(f"Warning: session_id not provided, generated new one: {session_id}")
122
+
123
+ # Retrieve or initialize chat history for this session
124
+ current_chat_history = conversation_histories.get(session_id, [])
125
+
126
+ # Append the new user message to the history
127
+ current_chat_history.append({"role": "user", "parts": [{"text": user_query}]})
128
+
129
+ # Generate the solution using the full chat history
130
+ solution_text = await generate_solution_python(current_chat_history)
131
+
132
+ # Append the model's response to the history
133
+ current_chat_history.append({"role": "model", "parts": [{"text": solution_text}]})
134
+
135
+ # Store the updated history
136
+ conversation_histories[session_id] = current_chat_history
137
 
138
+ return jsonify({"solution": solution_text, "session_id": session_id})
 
 
139
 
140
  except Exception as e:
 
141
  print(f"Error in /generate endpoint: {e}")
142
  return jsonify({"error": f"Internal server error: {e}"}), 500
143
 
144
  if __name__ == '__main__':
 
145
  app.run(host='0.0.0.0', port=7860)