Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -1,46 +1,46 @@
|
|
| 1 |
import os
|
| 2 |
import google.generativeai as genai
|
| 3 |
-
from fastapi import FastAPI, HTTPException
|
| 4 |
from pydantic import BaseModel
|
| 5 |
-
import traceback # Добавили для отладки
|
| 6 |
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
api_key
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
else:
|
| 15 |
-
print(f"Ключ найден, первые символы: {api_key[:5]}...")
|
| 16 |
-
genai.configure(api_key=api_key)
|
| 17 |
-
|
| 18 |
-
# Попробуем gemini-pro, она доступна всем
|
| 19 |
-
model = genai.GenerativeModel("gemini-1.5-flash")
|
| 20 |
|
| 21 |
class RequestData(BaseModel):
|
| 22 |
context: str
|
| 23 |
-
mood: str
|
| 24 |
|
| 25 |
@app.post("/complete")
|
| 26 |
async def complete_text(data: RequestData):
|
| 27 |
try:
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
elif data.mood == "serious":
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
response = model.generate_content(prompt)
|
| 38 |
-
print(f"Ответ от Gemini получен: {response.text[:20]}...")
|
| 39 |
-
|
| 40 |
return {"completion": response.text.strip()}
|
| 41 |
except Exception as e:
|
| 42 |
-
#
|
| 43 |
-
error_msg = traceback.format_exc()
|
| 44 |
-
print("!!! КРИТИЧЕСКАЯ ОШИБКА !!!")
|
| 45 |
-
print(error_msg)
|
| 46 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
| 1 |
import os
|
| 2 |
import google.generativeai as genai
|
| 3 |
+
from fastapi import FastAPI, HTTPException
|
| 4 |
from pydantic import BaseModel
|
|
|
|
| 5 |
|
| 6 |
app = FastAPI()
|
| 7 |
|
| 8 |
+
# Получаем ключ
|
| 9 |
+
genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
|
| 10 |
|
| 11 |
+
# ВАЖНО: Убедись, что в requirements.txt версия >=0.7.2, иначе flash не заработает
|
| 12 |
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
class RequestData(BaseModel):
|
| 15 |
context: str
|
| 16 |
+
mood: str
|
| 17 |
|
| 18 |
@app.post("/complete")
|
| 19 |
async def complete_text(data: RequestData):
|
| 20 |
try:
|
| 21 |
+
# Настройка личности
|
| 22 |
+
tone_instruction = ""
|
| 23 |
+
if data.mood == "fun":
|
| 24 |
+
tone_instruction = "Отвечай весело, используй сленг, эмодзи."
|
| 25 |
+
elif data.mood == "serious":
|
| 26 |
+
tone_instruction = "Отвечай официально, сухо, деловой стиль."
|
| 27 |
+
elif data.mood == "angry":
|
| 28 |
+
tone_instruction = "Отвечай пассивно-агрессивно, с сарказмом."
|
| 29 |
+
else:
|
| 30 |
+
tone_instruction = "Отвечай спокойно и нейтрально."
|
| 31 |
|
| 32 |
+
# ТВОЙ НОВЫЙ ПРОМПТ
|
| 33 |
+
prompt = (
|
| 34 |
+
f"System: {tone_instruction}. "
|
| 35 |
+
f"User input might be in wrong keyboard layout (e.g. 'ghbdtn' instead of 'привет'). "
|
| 36 |
+
f"If input looks like encoded Russian, decode it internally and complete the Russian phrase. "
|
| 37 |
+
f"If input is English, just complete it. "
|
| 38 |
+
f"Do not repeat the input. Output ONLY the completion text.\n\n"
|
| 39 |
+
f"User Input: {data.context}\nOutput:"
|
| 40 |
+
)
|
| 41 |
|
| 42 |
response = model.generate_content(prompt)
|
|
|
|
|
|
|
| 43 |
return {"completion": response.text.strip()}
|
| 44 |
except Exception as e:
|
| 45 |
+
# Вывод ошибки в ответ, чтобы клиент увидел причину
|
|
|
|
|
|
|
|
|
|
| 46 |
raise HTTPException(status_code=500, detail=str(e))
|