Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
from flask import Flask, request, jsonify, send_from_directory
|
| 2 |
from flask_limiter import Limiter
|
| 3 |
from flask_limiter.util import get_remote_address
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
from io import BytesIO
|
| 6 |
from prodiapy import Prodia
|
|
@@ -254,45 +255,53 @@ async def generate_image():
|
|
| 254 |
return jsonify({"status": "error", "error": "Internal Server Error"}), 500
|
| 255 |
|
| 256 |
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
|
|
|
|
|
|
|
|
|
| 262 |
}
|
| 263 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
@app.route('/gemini', methods=['GET'])
|
| 265 |
@limiter.limit("30 per minute")
|
| 266 |
def gemini():
|
| 267 |
prompt = request.args.get('prompt')
|
| 268 |
-
model = request.args.get('model', 'gemini-pro')
|
| 269 |
|
| 270 |
if not prompt:
|
| 271 |
return jsonify({'error': 'Prompt parameter is required'}), 400
|
| 272 |
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
data = {
|
| 278 |
-
"contents": [
|
| 279 |
-
{
|
| 280 |
-
"parts": [
|
| 281 |
-
{
|
| 282 |
-
"text": prompt
|
| 283 |
-
}
|
| 284 |
-
]
|
| 285 |
-
}
|
| 286 |
-
]
|
| 287 |
-
}
|
| 288 |
-
|
| 289 |
-
response = requests.post(url + "?key=AIzaSyCKc8gUTrb7KjhAwY6NwATaMWIDr5etKFk", json=data)
|
| 290 |
-
|
| 291 |
-
if response.status_code == 200:
|
| 292 |
-
result = response.json()
|
| 293 |
-
return jsonify(result)
|
| 294 |
-
else:
|
| 295 |
-
return jsonify({'error': 'Failed to generate content'}), 500
|
| 296 |
|
| 297 |
if __name__ == "__main__":
|
| 298 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|
|
| 1 |
from flask import Flask, request, jsonify, send_from_directory
|
| 2 |
from flask_limiter import Limiter
|
| 3 |
from flask_limiter.util import get_remote_address
|
| 4 |
+
from google.generativeai import configure, GenerativeModel
|
| 5 |
from PIL import Image
|
| 6 |
from io import BytesIO
|
| 7 |
from prodiapy import Prodia
|
|
|
|
| 255 |
return jsonify({"status": "error", "error": "Internal Server Error"}), 500
|
| 256 |
|
| 257 |
|
| 258 |
+
configure(api_key="AIzaSyCKc8gUTrb7KjhAwY6NwATaMWIDr5etKFk")
|
| 259 |
+
|
| 260 |
+
# Set up the model
|
| 261 |
+
generation_config = {
|
| 262 |
+
"temperature": 0.9,
|
| 263 |
+
"top_p": 1,
|
| 264 |
+
"top_k": 1,
|
| 265 |
+
"max_output_tokens": 2048,
|
| 266 |
}
|
| 267 |
|
| 268 |
+
safety_settings = [
|
| 269 |
+
{
|
| 270 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
| 271 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
| 275 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
| 279 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
| 283 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
| 284 |
+
},
|
| 285 |
+
]
|
| 286 |
+
|
| 287 |
+
model = GenerativeModel(
|
| 288 |
+
model_name="gemini-1.0-pro-001",
|
| 289 |
+
generation_config=generation_config,
|
| 290 |
+
safety_settings=safety_settings
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
@app.route('/gemini', methods=['GET'])
|
| 294 |
@limiter.limit("30 per minute")
|
| 295 |
def gemini():
|
| 296 |
prompt = request.args.get('prompt')
|
|
|
|
| 297 |
|
| 298 |
if not prompt:
|
| 299 |
return jsonify({'error': 'Prompt parameter is required'}), 400
|
| 300 |
|
| 301 |
+
convo = model.start_chat(history=[prompt])
|
| 302 |
+
response = convo.last.text
|
| 303 |
+
|
| 304 |
+
return jsonify({'response': response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
|
| 306 |
if __name__ == "__main__":
|
| 307 |
app.run(host="0.0.0.0", port=7860, debug=True)
|