Pepguy commited on
Commit
93d0dca
·
verified ·
1 Parent(s): 3ad0f78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -32
app.py CHANGED
@@ -20,13 +20,8 @@ MAX_HISTORY_TURNS = 10 # Maximum conversation turns to keep in context
20
  client = genai.Client(api_key=GEMINI_KEY)
21
  user_memory = {} # { user_id: { "history": [], "last_sync": timestamp } }
22
 
23
- # --- Helper: sync single user's history (added) ---
24
  def sync_user(uid):
25
- """
26
- Attempt to sync a single user's most recent history to LAMBDA_URL.
27
- Updates user_memory[uid]['last_sync'] on success and logs results.
28
- This runs in its own thread when called from update_user_history and in the flush loop.
29
- """
30
  try:
31
  data = user_memory.get(uid)
32
  if not data:
@@ -40,18 +35,15 @@ def sync_user(uid):
40
  except Exception as e:
41
  app.logger.warning(f"Failed sync for {uid}: {e}")
42
 
43
- # --- Background thread for periodic flush (uses helper) ---
44
  def flush_loop():
45
  while True:
46
  now = time.time()
47
  for uid, data in list(user_memory.items()):
48
  if now - data.get("last_sync", 0) >= FLUSH_INTERVAL and data.get("history"):
49
- # Offload the actual network call to a short-lived thread so the loop isn't blocked.
50
  threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
51
  time.sleep(5)
52
 
53
- # NOTE: don't start the thread at module import time to avoid issues with Flask reloader.
54
- # We'll start it once when the app starts serving requests.
55
  def start_flush_thread_once():
56
  t = threading.Thread(target=flush_loop, daemon=True)
57
  t.start()
@@ -110,29 +102,21 @@ HTML = """
110
  # --- Gemini Generation with History ---
111
  def generate_from_gemini(prompt, image_bytes=None, history=None):
112
  start_time = time.time()
113
-
114
- # Build contents list with history
115
  contents = []
116
-
117
- # Add historical messages (limit to recent turns to avoid token limits)
118
  if history:
119
  recent_history = history[-MAX_HISTORY_TURNS:]
120
  for entry in recent_history:
121
- # Add user message
122
  user_parts = [types.Part.from_text(text=entry["prompt"])]
123
  contents.append(types.Content(role="user", parts=user_parts))
124
-
125
- # Add model response
126
  model_parts = [types.Part.from_text(text=entry["response"])]
127
  contents.append(types.Content(role="model", parts=model_parts))
128
 
129
- # Add current user message
130
  current_parts = []
131
  if prompt:
132
  current_parts.append(types.Part.from_text(text=prompt))
133
  if image_bytes:
134
  current_parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
135
-
136
  contents.append(types.Content(role="user", parts=current_parts))
137
 
138
  cfg = types.GenerateContentConfig(response_mime_type="text/plain")
@@ -160,7 +144,6 @@ def get_user_history(uid):
160
  resp = requests.get(f"{LAMBDA_URL}?user_id={uid}", timeout=5)
161
  resp.raise_for_status()
162
  loaded_history = resp.json().get("history", [])
163
- # Only keep the most recent MAX_HISTORY_TURNS when loading
164
  user_memory[uid] = {
165
  "history": loaded_history[-MAX_HISTORY_TURNS:],
166
  "last_sync": 0
@@ -174,13 +157,9 @@ def get_user_history(uid):
174
  def update_user_history(uid, prompt, response):
175
  entry = {"prompt": prompt, "response": response, "timestamp": time.time()}
176
  user_memory.setdefault(uid, {"history": [], "last_sync": 0})["history"].append(entry)
177
-
178
- # Trim history to MAX_HISTORY_TURNS to prevent unbounded growth
179
  if len(user_memory[uid]["history"]) > MAX_HISTORY_TURNS:
180
  user_memory[uid]["history"] = user_memory[uid]["history"][-MAX_HISTORY_TURNS:]
181
  app.logger.debug(f"Trimmed history for {uid} to {MAX_HISTORY_TURNS} turns")
182
-
183
- # Attempt immediate async sync so you can see logs quickly (added)
184
  threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
185
 
186
  # --- Routes ---
@@ -201,22 +180,16 @@ def gen():
201
  return jsonify({"error": "No prompt or image provided"}), 400
202
 
203
  try:
204
- # Load user's conversation history
205
  history = get_user_history(uid)
206
-
207
- # Generate response with history context
208
  result = generate_from_gemini(prompt, img_bytes, history=history)
209
-
210
- # Update history with new exchange
211
  update_user_history(uid, prompt, result["text"])
212
-
213
  return jsonify({"result": result["text"], "timing": result["timing"]})
214
  except Exception as e:
215
  app.logger.exception("Generation failed")
216
  return jsonify({"error": str(e)}), 500
217
 
218
- # Ensure background thread starts when app begins serving requests
219
- @app.before_first_request
220
  def _start_background_tasks():
221
  start_flush_thread_once()
222
 
 
20
  client = genai.Client(api_key=GEMINI_KEY)
21
  user_memory = {} # { user_id: { "history": [], "last_sync": timestamp } }
22
 
23
+ # --- Helper: sync single user's history ---
24
  def sync_user(uid):
 
 
 
 
 
25
  try:
26
  data = user_memory.get(uid)
27
  if not data:
 
35
  except Exception as e:
36
  app.logger.warning(f"Failed sync for {uid}: {e}")
37
 
38
+ # --- Background thread for periodic flush ---
39
  def flush_loop():
40
  while True:
41
  now = time.time()
42
  for uid, data in list(user_memory.items()):
43
  if now - data.get("last_sync", 0) >= FLUSH_INTERVAL and data.get("history"):
 
44
  threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
45
  time.sleep(5)
46
 
 
 
47
  def start_flush_thread_once():
48
  t = threading.Thread(target=flush_loop, daemon=True)
49
  t.start()
 
102
  # --- Gemini Generation with History ---
103
  def generate_from_gemini(prompt, image_bytes=None, history=None):
104
  start_time = time.time()
 
 
105
  contents = []
106
+
 
107
  if history:
108
  recent_history = history[-MAX_HISTORY_TURNS:]
109
  for entry in recent_history:
 
110
  user_parts = [types.Part.from_text(text=entry["prompt"])]
111
  contents.append(types.Content(role="user", parts=user_parts))
 
 
112
  model_parts = [types.Part.from_text(text=entry["response"])]
113
  contents.append(types.Content(role="model", parts=model_parts))
114
 
 
115
  current_parts = []
116
  if prompt:
117
  current_parts.append(types.Part.from_text(text=prompt))
118
  if image_bytes:
119
  current_parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
 
120
  contents.append(types.Content(role="user", parts=current_parts))
121
 
122
  cfg = types.GenerateContentConfig(response_mime_type="text/plain")
 
144
  resp = requests.get(f"{LAMBDA_URL}?user_id={uid}", timeout=5)
145
  resp.raise_for_status()
146
  loaded_history = resp.json().get("history", [])
 
147
  user_memory[uid] = {
148
  "history": loaded_history[-MAX_HISTORY_TURNS:],
149
  "last_sync": 0
 
157
  def update_user_history(uid, prompt, response):
158
  entry = {"prompt": prompt, "response": response, "timestamp": time.time()}
159
  user_memory.setdefault(uid, {"history": [], "last_sync": 0})["history"].append(entry)
 
 
160
  if len(user_memory[uid]["history"]) > MAX_HISTORY_TURNS:
161
  user_memory[uid]["history"] = user_memory[uid]["history"][-MAX_HISTORY_TURNS:]
162
  app.logger.debug(f"Trimmed history for {uid} to {MAX_HISTORY_TURNS} turns")
 
 
163
  threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
164
 
165
  # --- Routes ---
 
180
  return jsonify({"error": "No prompt or image provided"}), 400
181
 
182
  try:
 
183
  history = get_user_history(uid)
 
 
184
  result = generate_from_gemini(prompt, img_bytes, history=history)
 
 
185
  update_user_history(uid, prompt, result["text"])
 
186
  return jsonify({"result": result["text"], "timing": result["timing"]})
187
  except Exception as e:
188
  app.logger.exception("Generation failed")
189
  return jsonify({"error": str(e)}), 500
190
 
191
+ # Flask 3.x compatible event
192
+ @app.before_serving
193
  def _start_background_tasks():
194
  start_flush_thread_once()
195