Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,9 +2,8 @@
|
|
| 2 |
# pip install flask google-genai requests boto3
|
| 3 |
|
| 4 |
import os
|
| 5 |
-
import io
|
| 6 |
import time
|
| 7 |
-
import
|
| 8 |
import requests
|
| 9 |
from flask import Flask, request, render_template_string, jsonify
|
| 10 |
from google import genai
|
|
@@ -12,161 +11,144 @@ from google.genai import types
|
|
| 12 |
|
| 13 |
app = Flask(__name__)
|
| 14 |
|
| 15 |
-
#
|
| 16 |
LAMBDA_URL = os.getenv("LAMBDA_URL", "https://your-lambda-function-url")
|
| 17 |
-
|
| 18 |
-
#
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
HTML = """
|
| 22 |
<!DOCTYPE html>
|
| 23 |
<html lang="en">
|
| 24 |
-
<head>
|
| 25 |
-
<meta charset="UTF-8" />
|
| 26 |
-
<title>Gemini 2.0 Flash Test</title>
|
| 27 |
-
</head>
|
| 28 |
<body style="font-family:sans-serif;padding:2rem;max-width:700px;">
|
| 29 |
-
<h1>Gemini 2.
|
| 30 |
-
|
| 31 |
-
<label><input type="checkbox" id="historyToggle"/> Enable History</label>
|
| 32 |
-
<br/><br/>
|
| 33 |
-
|
| 34 |
<form id="genai-form">
|
| 35 |
-
<
|
|
|
|
| 36 |
<input type="file" id="imageInput" accept="image/*"/><br/><br/>
|
| 37 |
<button type="submit">Generate</button>
|
| 38 |
</form>
|
| 39 |
-
|
| 40 |
<pre id="output" style="background:#f4f4f4;padding:1rem;margin-top:1rem;white-space:pre-wrap;"></pre>
|
| 41 |
|
| 42 |
<script>
|
| 43 |
const form = document.getElementById('genai-form');
|
| 44 |
-
const toggle = document.getElementById('historyToggle');
|
| 45 |
const out = document.getElementById('output');
|
| 46 |
|
| 47 |
form.addEventListener('submit', async e => {
|
| 48 |
e.preventDefault();
|
| 49 |
const prompt = document.getElementById('prompt').value.trim();
|
|
|
|
| 50 |
const fileInput = document.getElementById('imageInput');
|
| 51 |
-
const historyOn = toggle.checked;
|
| 52 |
-
if (!prompt && fileInput.files.length === 0) {
|
| 53 |
-
out.textContent = 'Please enter a prompt or attach an image.';
|
| 54 |
-
return;
|
| 55 |
-
}
|
| 56 |
|
| 57 |
-
out.textContent = '
|
|
|
|
| 58 |
|
|
|
|
| 59 |
const formData = new FormData();
|
| 60 |
formData.append("text", prompt);
|
| 61 |
-
formData.append("
|
| 62 |
-
if (fileInput.files.length > 0)
|
| 63 |
-
formData.append("image", fileInput.files[0]);
|
| 64 |
-
}
|
| 65 |
|
| 66 |
try {
|
| 67 |
-
const resp = await fetch('/generate', {
|
| 68 |
-
method: 'POST',
|
| 69 |
-
body: formData
|
| 70 |
-
});
|
| 71 |
-
|
| 72 |
const data = await resp.json();
|
| 73 |
-
if (data.error)
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
"⚙️ Model Time: " + data.timing.model_ms + " ms\\n\\n" +
|
| 79 |
-
"📜 Result:\\n" + data.result +
|
| 80 |
-
(data.history ? "\\n\\n💾 History synced" : "");
|
| 81 |
-
}
|
| 82 |
} catch (err) {
|
| 83 |
-
console.error(err);
|
| 84 |
out.textContent = 'Fetch error: ' + err.message;
|
| 85 |
}
|
| 86 |
});
|
| 87 |
</script>
|
| 88 |
-
</body>
|
| 89 |
-
</html>
|
| 90 |
"""
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
start_time = time.time()
|
| 95 |
parts = []
|
| 96 |
-
|
| 97 |
-
if
|
| 98 |
-
parts.append(types.Part.from_text(text=prompt))
|
| 99 |
-
if image_bytes:
|
| 100 |
-
parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
|
| 101 |
|
| 102 |
contents = [types.Content(role="user", parts=parts)]
|
| 103 |
-
|
| 104 |
|
| 105 |
model_start = time.time()
|
| 106 |
-
|
| 107 |
-
model="gemini-2.5-flash-lite",
|
| 108 |
-
#model="gemini-2.0-flash",
|
| 109 |
-
contents=contents,
|
| 110 |
-
config=config,
|
| 111 |
-
)
|
| 112 |
model_end = time.time()
|
| 113 |
|
| 114 |
-
return {
|
| 115 |
-
"
|
| 116 |
-
"
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
def index():
|
| 134 |
return render_template_string(HTML)
|
| 135 |
|
| 136 |
-
@app.route(
|
| 137 |
def gen():
|
| 138 |
-
|
| 139 |
-
|
|
|
|
| 140 |
|
|
|
|
| 141 |
image = request.files.get("image")
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
if not prompt and not image_bytes:
|
| 145 |
return jsonify({"error": "No prompt or image provided"}), 400
|
| 146 |
|
| 147 |
try:
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
}
|
| 153 |
-
|
| 154 |
-
if history_on:
|
| 155 |
-
history_payload = {
|
| 156 |
-
"prompt": prompt,
|
| 157 |
-
"response": result["text"],
|
| 158 |
-
"timestamp": time.time(),
|
| 159 |
-
}
|
| 160 |
-
history_resp = handle_history_sync(history_payload)
|
| 161 |
-
if history_resp:
|
| 162 |
-
response_json["history"] = history_resp
|
| 163 |
-
|
| 164 |
-
return jsonify(response_json)
|
| 165 |
except Exception as e:
|
| 166 |
app.logger.exception("Generation failed")
|
| 167 |
return jsonify({"error": str(e)}), 500
|
| 168 |
|
| 169 |
|
| 170 |
if __name__ == "__main__":
|
| 171 |
-
port = int(os.
|
| 172 |
app.run(host="0.0.0.0", port=port)
|
|
|
|
| 2 |
# pip install flask google-genai requests boto3
|
| 3 |
|
| 4 |
import os
|
|
|
|
| 5 |
import time
|
| 6 |
+
import threading
|
| 7 |
import requests
|
| 8 |
from flask import Flask, request, render_template_string, jsonify
|
| 9 |
from google import genai
|
|
|
|
| 11 |
|
| 12 |
app = Flask(__name__)
|
| 13 |
|
| 14 |
+
# --- Configuration ---
|
| 15 |
LAMBDA_URL = os.getenv("LAMBDA_URL", "https://your-lambda-function-url")
|
| 16 |
+
GEMINI_KEY = os.getenv("GEMINI_API_KEY", "")
|
| 17 |
+
FLUSH_INTERVAL = 30 # seconds between DB backups per user
|
| 18 |
+
|
| 19 |
+
client = genai.Client(api_key=GEMINI_KEY)
|
| 20 |
+
user_memory = {} # { user_id: { "history": [], "last_sync": timestamp } }
|
| 21 |
+
|
| 22 |
+
# --- Background thread for periodic flush ---
|
| 23 |
+
def flush_loop():
|
| 24 |
+
while True:
|
| 25 |
+
now = time.time()
|
| 26 |
+
for uid, data in list(user_memory.items()):
|
| 27 |
+
if now - data.get("last_sync", 0) >= FLUSH_INTERVAL and data["history"]:
|
| 28 |
+
try:
|
| 29 |
+
payload = {"user_id": uid, "history": data["history"]}
|
| 30 |
+
requests.post(LAMBDA_URL, json=payload, timeout=5)
|
| 31 |
+
user_memory[uid]["last_sync"] = now
|
| 32 |
+
app.logger.info(f"Synced memory for {uid}")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
app.logger.warning(f"Failed sync for {uid}: {e}")
|
| 35 |
+
time.sleep(5)
|
| 36 |
+
|
| 37 |
+
threading.Thread(target=flush_loop, daemon=True).start()
|
| 38 |
+
|
| 39 |
+
# --- HTML Frontend ---
|
| 40 |
HTML = """
|
| 41 |
<!DOCTYPE html>
|
| 42 |
<html lang="en">
|
| 43 |
+
<head><meta charset="UTF-8" /><title>Gemini 2.5 Flash-Lite</title></head>
|
|
|
|
|
|
|
|
|
|
| 44 |
<body style="font-family:sans-serif;padding:2rem;max-width:700px;">
|
| 45 |
+
<h1>Gemini 2.5 Flash-Lite (Text + Image)</h1>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
<form id="genai-form">
|
| 47 |
+
<input type="text" id="userId" placeholder="User ID / Token" style="width:300px;" /><br/><br/>
|
| 48 |
+
<textarea id="prompt" rows="5" cols="60" placeholder="Enter prompt..."></textarea><br/><br/>
|
| 49 |
<input type="file" id="imageInput" accept="image/*"/><br/><br/>
|
| 50 |
<button type="submit">Generate</button>
|
| 51 |
</form>
|
|
|
|
| 52 |
<pre id="output" style="background:#f4f4f4;padding:1rem;margin-top:1rem;white-space:pre-wrap;"></pre>
|
| 53 |
|
| 54 |
<script>
|
| 55 |
const form = document.getElementById('genai-form');
|
|
|
|
| 56 |
const out = document.getElementById('output');
|
| 57 |
|
| 58 |
form.addEventListener('submit', async e => {
|
| 59 |
e.preventDefault();
|
| 60 |
const prompt = document.getElementById('prompt').value.trim();
|
| 61 |
+
const uid = document.getElementById('userId').value.trim();
|
| 62 |
const fileInput = document.getElementById('imageInput');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
if (!uid) { out.textContent = 'Please enter a user ID/token.'; return; }
|
| 65 |
+
if (!prompt && fileInput.files.length === 0) { out.textContent = 'Enter text or attach image.'; return; }
|
| 66 |
|
| 67 |
+
out.textContent = 'Generating…';
|
| 68 |
const formData = new FormData();
|
| 69 |
formData.append("text", prompt);
|
| 70 |
+
formData.append("user_id", uid);
|
| 71 |
+
if (fileInput.files.length > 0) formData.append("image", fileInput.files[0]);
|
|
|
|
|
|
|
| 72 |
|
| 73 |
try {
|
| 74 |
+
const resp = await fetch('/generate', { method: 'POST', body: formData });
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
const data = await resp.json();
|
| 76 |
+
if (data.error) out.textContent = 'Error: ' + data.error;
|
| 77 |
+
else out.textContent =
|
| 78 |
+
"🕒 Total Time: " + data.timing.total_ms + " ms\\n" +
|
| 79 |
+
"⚙️ Model Time: " + data.timing.model_ms + " ms\\n\\n" +
|
| 80 |
+
"📜 Result:\\n" + data.result;
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
} catch (err) {
|
|
|
|
| 82 |
out.textContent = 'Fetch error: ' + err.message;
|
| 83 |
}
|
| 84 |
});
|
| 85 |
</script>
|
| 86 |
+
</body></html>
|
|
|
|
| 87 |
"""
|
| 88 |
|
| 89 |
+
# --- Gemini Generation ---
|
| 90 |
+
def generate_from_gemini(prompt, image_bytes=None):
|
| 91 |
start_time = time.time()
|
| 92 |
parts = []
|
| 93 |
+
if prompt: parts.append(types.Part.from_text(text=prompt))
|
| 94 |
+
if image_bytes: parts.append(types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"))
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
contents = [types.Content(role="user", parts=parts)]
|
| 97 |
+
cfg = types.GenerateContentConfig(response_mime_type="text/plain")
|
| 98 |
|
| 99 |
model_start = time.time()
|
| 100 |
+
res = client.models.generate_content(model="gemini-2.5-flash-lite", contents=contents, config=cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
model_end = time.time()
|
| 102 |
|
| 103 |
+
return {"text": res.text, "timing": {
|
| 104 |
+
"total_ms": int((time.time() - start_time)*1000),
|
| 105 |
+
"model_ms": int((model_end - model_start)*1000)
|
| 106 |
+
}}
|
| 107 |
+
|
| 108 |
+
# --- History Management ---
|
| 109 |
+
def get_user_history(uid):
|
| 110 |
+
if uid not in user_memory:
|
| 111 |
+
try:
|
| 112 |
+
resp = requests.get(f"{LAMBDA_URL}?user_id={uid}", timeout=5)
|
| 113 |
+
resp.raise_for_status()
|
| 114 |
+
user_memory[uid] = {"history": resp.json().get("history", []), "last_sync": 0}
|
| 115 |
+
app.logger.info(f"Loaded history for {uid}")
|
| 116 |
+
except Exception as e:
|
| 117 |
+
app.logger.warning(f"Failed to load history for {uid}: {e}")
|
| 118 |
+
user_memory[uid] = {"history": [], "last_sync": 0}
|
| 119 |
+
return user_memory[uid]["history"]
|
| 120 |
+
|
| 121 |
+
def update_user_history(uid, prompt, response):
|
| 122 |
+
entry = {"prompt": prompt, "response": response, "timestamp": time.time()}
|
| 123 |
+
user_memory.setdefault(uid, {"history": [], "last_sync": 0})["history"].append(entry)
|
| 124 |
+
|
| 125 |
+
# --- Routes ---
|
| 126 |
+
@app.route("/")
|
| 127 |
def index():
|
| 128 |
return render_template_string(HTML)
|
| 129 |
|
| 130 |
+
@app.route("/generate", methods=["POST"])
|
| 131 |
def gen():
|
| 132 |
+
uid = request.form.get("user_id", "").strip()
|
| 133 |
+
if not uid:
|
| 134 |
+
return jsonify({"error": "Missing user ID/token"}), 400
|
| 135 |
|
| 136 |
+
prompt = request.form.get("text", "")
|
| 137 |
image = request.files.get("image")
|
| 138 |
+
img_bytes = image.read() if image else None
|
| 139 |
+
if not prompt and not img_bytes:
|
|
|
|
| 140 |
return jsonify({"error": "No prompt or image provided"}), 400
|
| 141 |
|
| 142 |
try:
|
| 143 |
+
_ = get_user_history(uid)
|
| 144 |
+
result = generate_from_gemini(prompt, img_bytes)
|
| 145 |
+
update_user_history(uid, prompt, result["text"])
|
| 146 |
+
return jsonify({"result": result["text"], "timing": result["timing"]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
except Exception as e:
|
| 148 |
app.logger.exception("Generation failed")
|
| 149 |
return jsonify({"error": str(e)}), 500
|
| 150 |
|
| 151 |
|
| 152 |
if __name__ == "__main__":
|
| 153 |
+
port = int(os.getenv("PORT", 7860))
|
| 154 |
app.run(host="0.0.0.0", port=port)
|