Pepguy commited on
Commit
da556ba
·
verified ·
1 Parent(s): 93d0dca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -32
app.py CHANGED
@@ -20,34 +20,28 @@ MAX_HISTORY_TURNS = 10 # Maximum conversation turns to keep in context
20
  client = genai.Client(api_key=GEMINI_KEY)
21
  user_memory = {} # { user_id: { "history": [], "last_sync": timestamp } }
22
 
23
- # --- Helper: sync single user's history ---
24
- def sync_user(uid):
25
- try:
26
- data = user_memory.get(uid)
27
- if not data:
28
- return
29
- history_to_sync = data["history"][-MAX_HISTORY_TURNS:]
30
- payload = {"user_id": uid, "history": history_to_sync}
31
- resp = requests.post(LAMBDA_URL, json=payload, timeout=5)
32
- resp.raise_for_status()
33
- user_memory[uid]["last_sync"] = time.time()
34
- app.logger.info(f"Synced memory for {uid} ({len(history_to_sync)} turns)")
35
- except Exception as e:
36
- app.logger.warning(f"Failed sync for {uid}: {e}")
37
-
38
  # --- Background thread for periodic flush ---
39
  def flush_loop():
40
  while True:
 
41
  now = time.time()
42
  for uid, data in list(user_memory.items()):
43
- if now - data.get("last_sync", 0) >= FLUSH_INTERVAL and data.get("history"):
44
- threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
45
- time.sleep(5)
46
-
47
- def start_flush_thread_once():
48
- t = threading.Thread(target=flush_loop, daemon=True)
49
- t.start()
50
- app.logger.info("Started flush background thread")
 
 
 
 
 
 
 
 
51
 
52
  # --- HTML Frontend ---
53
  HTML = """
@@ -102,21 +96,29 @@ HTML = """
102
  # --- Gemini Generation with History ---
103
  def generate_from_gemini(prompt, image_bytes=None, history=None):
104
  start_time = time.time()
 
 
105
  contents = []
106
-
 
107
  if history:
108
  recent_history = history[-MAX_HISTORY_TURNS:]
109
  for entry in recent_history:
 
110
  user_parts = [types.Part.from_text(text=entry["prompt"])]
111
  contents.append(types.Content(role="user", parts=user_parts))
 
 
112
  model_parts = [types.Part.from_text(text=entry["response"])]
113
  contents.append(types.Content(role="model", parts=model_parts))
114
 
 
115
  current_parts = []
116
  if prompt:
117
  current_parts.append(types.Part.from_text(text=prompt))
118
  if image_bytes:
119
  current_parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
 
120
  contents.append(types.Content(role="user", parts=current_parts))
121
 
122
  cfg = types.GenerateContentConfig(response_mime_type="text/plain")
@@ -144,23 +146,28 @@ def get_user_history(uid):
144
  resp = requests.get(f"{LAMBDA_URL}?user_id={uid}", timeout=5)
145
  resp.raise_for_status()
146
  loaded_history = resp.json().get("history", [])
 
147
  user_memory[uid] = {
148
  "history": loaded_history[-MAX_HISTORY_TURNS:],
149
- "last_sync": 0
150
  }
151
  app.logger.info(f"Loaded history for {uid} ({len(user_memory[uid]['history'])} turns)")
152
  except Exception as e:
153
  app.logger.warning(f"Failed to load history for {uid}: {e}")
154
- user_memory[uid] = {"history": [], "last_sync": 0}
155
  return user_memory[uid]["history"]
156
 
157
  def update_user_history(uid, prompt, response):
158
  entry = {"prompt": prompt, "response": response, "timestamp": time.time()}
159
- user_memory.setdefault(uid, {"history": [], "last_sync": 0})["history"].append(entry)
 
 
 
 
 
160
  if len(user_memory[uid]["history"]) > MAX_HISTORY_TURNS:
161
  user_memory[uid]["history"] = user_memory[uid]["history"][-MAX_HISTORY_TURNS:]
162
  app.logger.debug(f"Trimmed history for {uid} to {MAX_HISTORY_TURNS} turns")
163
- threading.Thread(target=sync_user, args=(uid,), daemon=True).start()
164
 
165
  # --- Routes ---
166
  @app.route("/")
@@ -180,18 +187,20 @@ def gen():
180
  return jsonify({"error": "No prompt or image provided"}), 400
181
 
182
  try:
 
183
  history = get_user_history(uid)
 
 
184
  result = generate_from_gemini(prompt, img_bytes, history=history)
 
 
185
  update_user_history(uid, prompt, result["text"])
 
186
  return jsonify({"result": result["text"], "timing": result["timing"]})
187
  except Exception as e:
188
  app.logger.exception("Generation failed")
189
  return jsonify({"error": str(e)}), 500
190
 
191
- # Flask 3.x compatible event
192
- @app.before_serving
193
- def _start_background_tasks():
194
- start_flush_thread_once()
195
 
196
  if __name__ == "__main__":
197
  port = int(os.getenv("PORT", 7860))
 
20
  client = genai.Client(api_key=GEMINI_KEY)
21
  user_memory = {} # { user_id: { "history": [], "last_sync": timestamp } }
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  # --- Background thread for periodic flush ---
24
  def flush_loop():
25
  while True:
26
+ time.sleep(5)
27
  now = time.time()
28
  for uid, data in list(user_memory.items()):
29
+ if now - data.get("last_sync", 0) >= FLUSH_INTERVAL and data["history"]:
30
+ try:
31
+ # Only sync the most recent MAX_HISTORY_TURNS entries
32
+ history_to_sync = data["history"][-MAX_HISTORY_TURNS:]
33
+ payload = {"user_id": uid, "history": history_to_sync}
34
+ print(f"🔄 Attempting to sync {uid} to {LAMBDA_URL}")
35
+ resp = requests.post(LAMBDA_URL, json=payload, timeout=5)
36
+ resp.raise_for_status()
37
+ user_memory[uid]["last_sync"] = now
38
+ app.logger.info(f"Synced memory for {uid} ({len(history_to_sync)} turns)")
39
+ print(f"✅ Successfully synced memory for {uid} ({len(history_to_sync)} turns)")
40
+ except Exception as e:
41
+ app.logger.warning(f"Failed sync for {uid}: {e}")
42
+ print(f"❌ Failed sync for {uid}: {e}")
43
+
44
+ threading.Thread(target=flush_loop, daemon=True).start()
45
 
46
  # --- HTML Frontend ---
47
  HTML = """
 
96
  # --- Gemini Generation with History ---
97
  def generate_from_gemini(prompt, image_bytes=None, history=None):
98
  start_time = time.time()
99
+
100
+ # Build contents list with history
101
  contents = []
102
+
103
+ # Add historical messages (limit to recent turns to avoid token limits)
104
  if history:
105
  recent_history = history[-MAX_HISTORY_TURNS:]
106
  for entry in recent_history:
107
+ # Add user message
108
  user_parts = [types.Part.from_text(text=entry["prompt"])]
109
  contents.append(types.Content(role="user", parts=user_parts))
110
+
111
+ # Add model response
112
  model_parts = [types.Part.from_text(text=entry["response"])]
113
  contents.append(types.Content(role="model", parts=model_parts))
114
 
115
+ # Add current user message
116
  current_parts = []
117
  if prompt:
118
  current_parts.append(types.Part.from_text(text=prompt))
119
  if image_bytes:
120
  current_parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
121
+
122
  contents.append(types.Content(role="user", parts=current_parts))
123
 
124
  cfg = types.GenerateContentConfig(response_mime_type="text/plain")
 
146
  resp = requests.get(f"{LAMBDA_URL}?user_id={uid}", timeout=5)
147
  resp.raise_for_status()
148
  loaded_history = resp.json().get("history", [])
149
+ # Only keep the most recent MAX_HISTORY_TURNS when loading
150
  user_memory[uid] = {
151
  "history": loaded_history[-MAX_HISTORY_TURNS:],
152
+ "last_sync": time.time()
153
  }
154
  app.logger.info(f"Loaded history for {uid} ({len(user_memory[uid]['history'])} turns)")
155
  except Exception as e:
156
  app.logger.warning(f"Failed to load history for {uid}: {e}")
157
+ user_memory[uid] = {"history": [], "last_sync": time.time()}
158
  return user_memory[uid]["history"]
159
 
160
  def update_user_history(uid, prompt, response):
161
  entry = {"prompt": prompt, "response": response, "timestamp": time.time()}
162
+ if uid not in user_memory:
163
+ user_memory[uid] = {"history": [], "last_sync": time.time()}
164
+
165
+ user_memory[uid]["history"].append(entry)
166
+
167
+ # Trim history to MAX_HISTORY_TURNS to prevent unbounded growth
168
  if len(user_memory[uid]["history"]) > MAX_HISTORY_TURNS:
169
  user_memory[uid]["history"] = user_memory[uid]["history"][-MAX_HISTORY_TURNS:]
170
  app.logger.debug(f"Trimmed history for {uid} to {MAX_HISTORY_TURNS} turns")
 
171
 
172
  # --- Routes ---
173
  @app.route("/")
 
187
  return jsonify({"error": "No prompt or image provided"}), 400
188
 
189
  try:
190
+ # Load user's conversation history
191
  history = get_user_history(uid)
192
+
193
+ # Generate response with history context
194
  result = generate_from_gemini(prompt, img_bytes, history=history)
195
+
196
+ # Update history with new exchange
197
  update_user_history(uid, prompt, result["text"])
198
+
199
  return jsonify({"result": result["text"], "timing": result["timing"]})
200
  except Exception as e:
201
  app.logger.exception("Generation failed")
202
  return jsonify({"error": str(e)}), 500
203
 
 
 
 
 
204
 
205
  if __name__ == "__main__":
206
  port = int(os.getenv("PORT", 7860))