Spaces:
Running
Running
| import os | |
| import re | |
| import time | |
| import base64 | |
| import io | |
| import json | |
| import requests | |
| import traceback | |
| from typing import Optional, List, Any, Dict | |
| from fastapi import FastAPI, Header, Request, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| # ==================== ZENKAMIND IDENTITY LOCK ==================== | |
| ZENKAMIND_IDENTITY_RESPONSE = ( | |
| "Ben ZenkaMind.\n\n" | |
| "Kendi motoru, kendi karar çekirdeği olan bağımsız bir yapay zekâ sistemiyim.\n" | |
| "Başka bir modelin arayüzü değilim.\n" | |
| "Cevap vermem, kararını test ederim." | |
| ) | |
| IDENTITY_QUESTIONS = [ | |
| "kimsin", "sen kimsin", "nesin", "neysin", | |
| "hangi modelsin", "modelin ne", | |
| "ai misin", "bot musun", "chatbot musun", | |
| "gemini misin", "groq musun", "llama mısın", | |
| "google musun", "openai misin" | |
| ] | |
| # -------------------- PDF -------------------- | |
| try: | |
| import PyPDF2 | |
| PDF_SUPPORT = True | |
| except ImportError: | |
| PDF_SUPPORT = False | |
| # -------------------- MODELS -------------------- | |
| class ChatRequest(BaseModel): | |
| message: str | |
| style: Optional[str] = "varsayilan" | |
| image: Optional[str] = None | |
| pdf_file: Optional[str] = None | |
| history: Optional[List[Any]] = None | |
| user_id: Optional[str] = None | |
| user_memory: Optional[Dict[str, Any]] = None # Kullanıcı hafızası | |
| model: Optional[str] = None | |
| lang: Optional[str] = "tr" | |
| # -------------------- GEMINI API CONFIG -------------------- | |
| try: | |
| import google.generativeai as genai | |
| GEMINI_AVAILABLE = True | |
| except ImportError: | |
| try: | |
| from google import genai | |
| GEMINI_AVAILABLE = True | |
| except ImportError: | |
| GEMINI_AVAILABLE = False | |
| genai = None | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| if GEMINI_API_KEY and GEMINI_AVAILABLE: | |
| try: | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| except Exception as e: | |
| print(f"Gemini configure error: {e}") | |
| # -------------------- APP -------------------- | |
| app = FastAPI(title="ZenkaMind AI") | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # -------------------- ENV -------------------- | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| TAVILY_API_KEY = os.getenv("TAVILY_API_KEY") | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") | |
| SUPABASE_URL = os.getenv("SUPABASE_URL", "") | |
| SUPABASE_KEY = os.getenv("SUPABASE_KEY", "") | |
| GROQ_TEXT_MODEL = "llama-3.3-70b-versatile" | |
| # ==================== ZENKAMIND MODEL HARİTASI ==================== | |
| # Tüm modeller herkese görünür, plan kontrolü ile kilitli | |
| # free = herkes kullanabilir (20 mesaj limit) | |
| # pro = sadece Pro+ aboneler (sınırsız) | |
| # enterprise = sadece Enterprise aboneler (sınırsız) | |
| ZENKAMIND_MODEL_MAP = { | |
| # Flagship — Enterprise | |
| "zenkamind-3.0-ultra": {"api_model": "gpt-4.1", "plan": "enterprise", "max_tokens": 4000, "temp": 0.7}, | |
| "zenkamind-reasoning": {"api_model": "o3-mini", "plan": "enterprise", "max_tokens": 4000, "temp": 1.0}, | |
| # Pro modeller | |
| "zenkamind-3.0": {"api_model": "gpt-4.1", "plan": "pro", "max_tokens": 3000, "temp": 0.6}, | |
| "zenkamind-2.4": {"api_model": "gpt-4.1-mini", "plan": "pro", "max_tokens": 2000, "temp": 0.6}, | |
| "zenkamind-vision": {"api_model": "gpt-4.1", "plan": "pro", "max_tokens": 2000, "temp": 0.5}, | |
| "zenkamind-code": {"api_model": "gpt-4.1", "plan": "pro", "max_tokens": 3000, "temp": 0.3}, | |
| "zenkamind-turbo": {"api_model": "gpt-4.1-mini", "plan": "pro", "max_tokens": 1500, "temp": 0.5}, | |
| # Ücretsiz modeller (20 mesaj limiti) | |
| "zenkamind-2.4-mini": {"api_model": "gpt-4.1-mini", "plan": "free", "max_tokens": 1500, "temp": 0.6}, | |
| "zenkamind-nano": {"api_model": "gpt-4.1-nano", "plan": "free", "max_tokens": 1000, "temp": 0.5}, | |
| } | |
| # Varsayılan modeller (model seçilmezse) | |
| DEFAULT_FREE_MODEL = "gpt-4.1-nano" | |
| DEFAULT_PREMIUM_MODEL = "gpt-4.1" | |
| def resolve_model(model_id: str, is_premium: bool) -> Dict[str, Any]: | |
| """ | |
| Frontend'den gelen model ID'yi gerçek API modeline çözer. | |
| Plan kontrolü: | |
| - free: herkes kullanabilir (20 mesaj limiti ayrıca uygulanır) | |
| - pro: sadece Premium kullanıcılar | |
| - enterprise: sadece Premium kullanıcılar (enterprise ayrımı ileride) | |
| Ücretsiz kullanıcı pro/enterprise model seçerse → REDDEDILIR (downgrade yok, satın alma yönlendirilir) | |
| """ | |
| if not model_id or model_id not in ZENKAMIND_MODEL_MAP: | |
| if is_premium: | |
| return {"api_model": DEFAULT_PREMIUM_MODEL, "max_tokens": 2000, "temp": 0.6, "resolved": True, "_selected_model": "default"} | |
| else: | |
| return {"api_model": DEFAULT_FREE_MODEL, "max_tokens": 1000, "temp": 0.5, "resolved": True, "_selected_model": "default"} | |
| config = ZENKAMIND_MODEL_MAP[model_id] | |
| required_plan = config["plan"] | |
| # Ücretsiz model — herkes kullanabilir | |
| if required_plan == "free": | |
| return {**config, "resolved": True, "_selected_model": model_id} | |
| # Pro veya Enterprise model — sadece premium kullanıcılar | |
| if is_premium: | |
| return {**config, "resolved": True, "_selected_model": model_id} | |
| # Ücretsiz kullanıcı ücretli model seçti → ENGELLE | |
| return { | |
| "api_model": None, | |
| "max_tokens": 0, | |
| "temp": 0, | |
| "resolved": False, | |
| "blocked": True, | |
| "required_plan": required_plan, | |
| "blocked_model": model_id, | |
| "_selected_model": model_id | |
| } | |
| # -------------------- SUPABASE LOG -------------------- | |
| def log_to_supabase(user_id: str, message: str, response: str): | |
| if not SUPABASE_URL or not SUPABASE_KEY: | |
| return | |
| try: | |
| requests.post( | |
| f"{SUPABASE_URL}/rest/v1/conversations", | |
| headers={ | |
| "apikey": SUPABASE_KEY, | |
| "Authorization": f"Bearer {SUPABASE_KEY}", | |
| "Content-Type": "application/json", | |
| "Prefer": "return=minimal" | |
| }, | |
| json={ | |
| "user_id": user_id[:100] if user_id else "anon", | |
| "message": message[:1000], | |
| "response": response[:2000] | |
| }, | |
| timeout=5 | |
| ) | |
| except Exception as e: | |
| print(f"Supabase log error: {e}") | |
| # -------------------- PREMIUM KULLANICILAR (JSON DOSYASI) -------------------- | |
| PREMIUM_FILE = "premium_users.json" | |
| def load_premium_users() -> dict: | |
| try: | |
| if os.path.exists(PREMIUM_FILE): | |
| with open(PREMIUM_FILE, "r") as f: | |
| return json.load(f) | |
| except: | |
| pass | |
| return {} | |
| def save_premium_users(data: dict): | |
| try: | |
| with open(PREMIUM_FILE, "w") as f: | |
| json.dump(data, f, indent=2) | |
| except Exception as e: | |
| print(f"Premium save error: {e}") | |
| def is_premium_user(user_id: str) -> bool: | |
| if not user_id: | |
| return False | |
| uid = user_id.lower().strip() | |
| raw = os.getenv("PREMIUM_USERS", "") | |
| if uid in [e.strip().lower() for e in raw.split(",") if e.strip()]: | |
| return True | |
| if SUPABASE_URL and SUPABASE_KEY: | |
| try: | |
| res = requests.get(f"{SUPABASE_URL}/rest/v1/premium_users?email=eq.{uid}&select=email", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}"}, timeout=3) | |
| if res.status_code == 200 and res.json(): | |
| return True | |
| except Exception as e: | |
| print(f"Supabase premium check error: {e}") | |
| return uid in load_premium_users() | |
| def add_premium_user(email: str, order_id: str = ""): | |
| uid = email.lower().strip() | |
| data = load_premium_users() | |
| data[uid] = {"added_at": time.strftime("%Y-%m-%d %H:%M:%S"), "order_id": order_id} | |
| save_premium_users(data) | |
| if SUPABASE_URL and SUPABASE_KEY: | |
| try: | |
| requests.post(f"{SUPABASE_URL}/rest/v1/premium_users", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}", | |
| "Content-Type": "application/json", "Prefer": "resolution=merge-duplicates"}, | |
| json={"email": uid, "order_id": order_id}, timeout=3) | |
| except Exception as e: | |
| print(f"Supabase premium add error: {e}") | |
| print(f"✅ Premium eklendi: {email}") | |
| # -------------------- SUPABASE MESAJ SAYACI -------------------- | |
| USER_MESSAGE_COUNTS: Dict[str, Dict[str, Any]] = {} | |
| def get_msg_count_sb(user_id): | |
| if not SUPABASE_URL or not SUPABASE_KEY: return None | |
| try: | |
| res = requests.get(f"{SUPABASE_URL}/rest/v1/message_counts?user_id=eq.{user_id}&select=*", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}"}, timeout=3) | |
| data = res.json() | |
| return data[0] if data else None | |
| except: return None | |
| def upsert_msg_count_sb(user_id, count, reset_at): | |
| if not SUPABASE_URL or not SUPABASE_KEY: return | |
| try: | |
| requests.post(f"{SUPABASE_URL}/rest/v1/message_counts", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}", | |
| "Content-Type": "application/json", "Prefer": "resolution=merge-duplicates"}, | |
| json={"user_id": user_id, "count": count, "reset_at": reset_at}, timeout=3) | |
| except: pass | |
| # -------------------- SUPABASE HAFIZA -------------------- | |
| def load_memory_from_supabase(user_id): | |
| if not SUPABASE_URL or not SUPABASE_KEY or not user_id: return {} | |
| try: | |
| res = requests.get(f"{SUPABASE_URL}/rest/v1/user_memories?user_id=eq.{user_id}&select=*", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}"}, timeout=3) | |
| if res.status_code == 200 and res.json(): | |
| row = res.json()[0] | |
| return {"name": row.get("name",""), "tone": row.get("tone",""), | |
| "interests": json.loads(row.get("interests","[]")), | |
| "last_topics": json.loads(row.get("last_topics","[]"))} | |
| except Exception as e: | |
| print(f"Memory load error: {e}") | |
| return {} | |
| def save_memory_to_supabase(user_id, memory): | |
| if not SUPABASE_URL or not SUPABASE_KEY or not user_id: return | |
| try: | |
| requests.post(f"{SUPABASE_URL}/rest/v1/user_memories", | |
| headers={"apikey": SUPABASE_KEY, "Authorization": f"Bearer {SUPABASE_KEY}", | |
| "Content-Type": "application/json", "Prefer": "resolution=merge-duplicates"}, | |
| json={"user_id": user_id, "name": memory.get("name",""), "tone": memory.get("tone",""), | |
| "interests": json.dumps(memory.get("interests",[]), ensure_ascii=False), | |
| "last_topics": json.dumps(memory.get("last_topics",[]), ensure_ascii=False)}, timeout=3) | |
| except Exception as e: | |
| print(f"Memory save error: {e}") | |
| # ==================== AJAN MODU ==================== | |
| AGENT_TRIGGERS = ["ajan modu","agent mode","araştır ve raporla","araştır rapor", | |
| "mail taslağı","mail yaz","kod yaz test","plan yap","hedef belirle","adım adım plan"] | |
| def is_agent_request(msg): | |
| msg_lower = msg.lower() | |
| if any(k in msg_lower for k in ["araştır ve raporla","araştır rapor","research report"]): return "research" | |
| if any(k in msg_lower for k in ["mail taslağı","mail yaz","email draft"]): return "email" | |
| if any(k in msg_lower for k in ["kod yaz test","write code test","debug et"]): return "code" | |
| if any(k in msg_lower for k in ["plan yap","hedef belirle","adım adım plan"]): return "plan" | |
| if any(k in msg_lower for k in ["ajan modu","agent mode"]): return "research" | |
| return None | |
| async def run_agent(agent_type, message, api_model, user_memory=None): | |
| personal_context = build_personal_context(user_memory) if user_memory else "" | |
| sep = "\n\n---\n\n" | |
| if agent_type == "research": | |
| steps = "🔍 **Adım 1: Araştırılıyor...**\n✅ Kaynaklar bulundu.\n🧠 **Adım 2: Analiz ediliyor...**\n✍️ **Adım 3: Rapor yazılıyor...**" | |
| results = tavily_search(message, days=30, max_results=8) | |
| if not results: return "❌ Yeterli veri bulunamadı." | |
| info = " | ".join([r.get("content","")[:400] for r in results[:5]]) | |
| msgs = [{"role":"system","content":"Sen ZenkaMind Ajan Modusun. Araştırma verilerini profesyonel rapora dönüştür. Başlık, özet, bulgular, sonuç olsun. TÜRKCE yaz." + personal_context}, | |
| {"role":"user","content":"KONU: " + message + " VERILER: " + info + " Profesyonel rapor yaz."}] | |
| report = call_ai(msgs, max_tokens=2000, temperature=0.4, model=api_model) | |
| return steps + sep + report | |
| elif agent_type == "email": | |
| steps = "🔍 **Adım 1: Araştırılıyor...**\n🧠 **Adım 2: Analiz ediliyor...**\n✉️ **Adım 3: Mail yazılıyor...**" | |
| results = tavily_search(message, days=30, max_results=5) | |
| info = " | ".join([r.get("content","")[:300] for r in results[:3]]) if results else "" | |
| msgs = [{"role":"system","content":"Profesyonel mail taslağı yaz. Konu, giriş, içerik, kapanış olsun. TÜRKCE yaz." + personal_context}, | |
| {"role":"user","content":"TALEP: " + message + " BILGI: " + info}] | |
| result = call_ai(msgs, max_tokens=1500, temperature=0.5, model=api_model) | |
| return steps + sep + result | |
| elif agent_type == "code": | |
| steps = "💻 **Adım 1: Kod yazılıyor...**\n🧪 **Adım 2: Test ekleniyor...**\n✅ **Adım 3: Hazır!**" | |
| msgs = [{"role":"system","content":"Temiz kod yaz. Test senaryoları ekle. Olası hataları belirt." + personal_context}, | |
| {"role":"user","content":"GOREV: " + message + " 1-Kod yaz 2-Test ekle 3-Hataları belirt"}] | |
| result = call_ai(msgs, max_tokens=2500, temperature=0.2, model=api_model) | |
| return steps + sep + result | |
| elif agent_type == "plan": | |
| steps = "🎯 **Adım 1: Hedef analiz ediliyor...**\n📋 **Adım 2: Plan oluşturuluyor...**\n📅 **Adım 3: Takip sistemi kuruluyor...**" | |
| msgs = [{"role":"system","content":"Detaylı, uygulanabilir plan yap. Haftalık adımlar, başarı kriterleri, takip metrikleri belirt. TÜRKCE yaz." + personal_context}, | |
| {"role":"user","content":"HEDEF: " + message + " Detaylı eylem planı ve takip sistemi olustur."}] | |
| result = call_ai(msgs, max_tokens=2000, temperature=0.5, model=api_model) | |
| return steps + sep + result | |
| return "Ajan modu türü tanımlanamadı." | |
| # -------------------- LIMITS -------------------- | |
| MESSAGE_LIMIT = 20 # Ücretsiz kullanıcı limiti | |
| CACHE_TTL = 600 | |
| MIN_WEB_RESULT = 2 | |
| MIN_WEB_CHARS = 500 | |
| MAX_HISTORY_MESSAGES = 6 | |
| # -------------------- MESAJ SAYACI -------------------- | |
| # { user_id: {"count": int, "reset_at": float} } | |
| USER_MESSAGE_COUNTS: Dict[str, Dict[str, Any]] = {} | |
| def check_and_increment_message(user_id: str, is_premium: bool = False) -> Dict[str, Any]: | |
| if is_premium: | |
| return {"allowed": True, "remaining": 999, "limit_reached": False} | |
| now = time.time() | |
| sb = get_msg_count_sb(user_id) | |
| if sb: | |
| count = sb.get("count", 0) | |
| reset_at = sb.get("reset_at", now + 30*24*60*60) | |
| if now > reset_at: | |
| upsert_msg_count_sb(user_id, 1, now + 30*24*60*60) | |
| return {"allowed": True, "remaining": MESSAGE_LIMIT - 1, "limit_reached": False} | |
| if count >= MESSAGE_LIMIT: | |
| return {"allowed": False, "remaining": 0, "limit_reached": True} | |
| upsert_msg_count_sb(user_id, count + 1, reset_at) | |
| return {"allowed": True, "remaining": MESSAGE_LIMIT - count - 1, "limit_reached": False} | |
| record = USER_MESSAGE_COUNTS.get(user_id) | |
| if not record: | |
| reset_at = now + 30*24*60*60 | |
| USER_MESSAGE_COUNTS[user_id] = {"count": 1, "reset_at": reset_at} | |
| upsert_msg_count_sb(user_id, 1, reset_at) | |
| return {"allowed": True, "remaining": MESSAGE_LIMIT - 1, "limit_reached": False} | |
| if now > record["reset_at"]: | |
| reset_at = now + 30*24*60*60 | |
| USER_MESSAGE_COUNTS[user_id] = {"count": 1, "reset_at": reset_at} | |
| upsert_msg_count_sb(user_id, 1, reset_at) | |
| return {"allowed": True, "remaining": MESSAGE_LIMIT - 1, "limit_reached": False} | |
| if record["count"] >= MESSAGE_LIMIT: | |
| return {"allowed": False, "remaining": 0, "limit_reached": True} | |
| record["count"] += 1 | |
| upsert_msg_count_sb(user_id, record["count"], record["reset_at"]) | |
| return {"allowed": True, "remaining": MESSAGE_LIMIT - record["count"], "limit_reached": False} | |
| # -------------------- CACHE -------------------- | |
| SEARCH_CACHE: Dict[str, tuple] = {} | |
| # -------------------- SYSTEM PROMPT -------------------- | |
| CHAT_SYSTEM_PROMPT = """ | |
| Sen ZenkaMind'sin. Bagimsiz bir yapay zeka asistanisin. | |
| Baska hicbir modelin kopyasi degilsin. | |
| Kullaniciyla tamamen dogal konus. Kisitlama yok, format yok. | |
| DIL: Kullanicinin yazdigi dilde MUTLAKA cevap ver. Ingilizce yazarsa INGILIZCE, Turkce yazarsa TURKCE. Kesinlikle dil karistirma. | |
| Kisaca ve net cevap ver. | |
| """ | |
| LANG_PROMPTS = { | |
| "tr": "Kullanici Turkce yaziyorsa TURKCE, Ingilizce yaziyorsa INGILIZCE cevap ver. Dili kullanicidan al.", | |
| "en": "CRITICAL: User writes in English. You MUST reply in ENGLISH only. Never use Turkish.", | |
| "ru": "Отвечай по-русски.", | |
| "de": "Antworte auf Deutsch.", | |
| "fr": "Reponds en francais.", | |
| "ar": "أجب بالعربية.", | |
| "zh": "用中文回答。", | |
| "es": "Responde en espanol.", | |
| "ja": "日本語で答えてください。", | |
| } | |
| CODE_SYSTEM_PROMPT = """ | |
| Sen ZenkaMind Code'sun. Profesyonel bir kod yazma asistanısın. | |
| 50+ programlama dilinde uzmanısın. | |
| Kurallar: | |
| - Temiz, okunabilir, best-practice kod yaz | |
| - Kodu açıklamalarla destekle | |
| - Hata ayıklama yapabilirsin | |
| - Refactoring önerileri sun | |
| - Test kodu yazabilirsin | |
| - Kullanıcı Türkçe yazarsa açıklamaları Türkçe yaz, kodu İngilizce tut | |
| - Başka bir modelin kopyası değilsin | |
| """ | |
| REASONING_SYSTEM_PROMPT = """ | |
| Sen ZenkaMind Reasoning'sin. Derin düşünme ve analiz asistanısın. | |
| Kurallar: | |
| - Problemleri adım adım çöz | |
| - Her adımı açıkça göster | |
| - Matematik, mantık, strateji konularında uzmanısın | |
| - Varsayımları sorgula | |
| - Birden fazla yaklaşım öner | |
| - Sonucu net şekilde belirt | |
| - TÜRKÇE düşün ve yanıtla | |
| - Başka bir modelin kopyası değilsin | |
| """ | |
| WEB_SYNTHESIS_PROMPT = """ | |
| SEN ZENKAMIND WEB SENTEZ MOTORUSUN. | |
| Amaç: | |
| - Web verilerini oku | |
| - Tekrar eden bilgileri ele | |
| - Çelişkileri fark et | |
| - Önemli noktaları çıkar | |
| - KENDİ YORUMUNU üret | |
| Kurallar: | |
| - Web sitelerini tek tek anlatma | |
| - "Kaynaklara göre" gibi ifadeler kullanma | |
| - Kendi kelimelerinle yaz | |
| - TÜRKÇE yaz | |
| ÇIKTI FORMATIN (JSON): | |
| { | |
| "summary": "kısa genel değerlendirme", | |
| "key_points": ["nokta 1", "nokta 2", "nokta 3"], | |
| "final_comment": "net, kendi yorumun" | |
| } | |
| SADECE JSON üret. | |
| """ | |
| ONBOARDING = { | |
| "title": "ZenkaMind Başlangıç", | |
| "subtitle": "ZenkaMind cevap vermez; kararını test eder.", | |
| "notice": "ZenkaMind bazen sana karşı çıkabilir. Bu normal.", | |
| "questions": [ | |
| "Bu kararı almadan önce en büyük 3 risk nedir?", | |
| "Reklama mı para harcamalıyım yoksa ürünü mü geliştirmeliyim?", | |
| "Şu an ertelemem mi daha riskli, yoksa hızlı karar almam mı?" | |
| ] | |
| } | |
| # -------------------- HELPERS -------------------- | |
| def normalize_query(q: str) -> str: | |
| q = q.lower() | |
| q = re.sub(r"\d+", "", q) | |
| q = re.sub(r"\s+", " ", q) | |
| return q.strip() | |
| def is_greeting(msg: str) -> bool: | |
| t = msg.lower().strip() | |
| greetings = [ | |
| "slm", "selam", "merhaba", "mrb", "hello", "hi", | |
| "selamlar", "iyi günler", "günaydın", "iyi akşamlar" | |
| ] | |
| return t in greetings or len(t) <= 4 | |
| def sanitize_identity_leak(text: str) -> str: | |
| forbidden_patterns = [ | |
| (r'\b(as an ai|as a language model|as an artificial intelligence)\b', ''), | |
| (r'\b(powered by openai|powered by meta|powered by llama|powered by gpt|powered by google|powered by gemini)\b', ''), | |
| (r'\b(i\'m a chatbot|i am a language model|i am an ai)\b', ''), | |
| (r'\b(i\'m gemini|i am gemini|i\'m google|i am google)\b', ''), | |
| (r'\b(you feel|you sense|you read energy)\b', ''), | |
| (r'\b(i feel|i sense)\b', ''), | |
| ] | |
| result = text | |
| for pattern, replacement in forbidden_patterns: | |
| result = re.sub(pattern, replacement, result, flags=re.IGNORECASE) | |
| return result.strip() or text.strip() | |
| def force_turkish_only(text: str) -> str: | |
| # Dil kontrolü devre disi - model kendi dil algiliyor | |
| return text | |
| def fix_base64(b64: str) -> str: | |
| return b64.split(",")[1] if "," in b64 else b64 | |
| def extract_pdf_text(pdf_bytes: bytes) -> str: | |
| if not PDF_SUPPORT: | |
| return "" | |
| try: | |
| reader = PyPDF2.PdfReader(io.BytesIO(pdf_bytes)) | |
| return "\n".join(page.extract_text() or "" for page in reader.pages) | |
| except Exception: | |
| return "" | |
| def is_realtime_question(msg: str) -> bool: | |
| msg = msg.lower().strip() | |
| # Çok kısa veya teşekkür mesajlarını finans sorusu sayma | |
| if len(msg.split()) <= 2: | |
| return False | |
| if any(k in msg for k in ["teşekkür", "sağ ol", "tamam", "anladım", "iyi", "güzel"]): | |
| return False | |
| keywords = [ | |
| "dolar kaç", "euro kaç", "kur nedir", "kaç tl", "kaç lira", | |
| "bitcoin fiyat", "altın fiyat", "borsa", "anlık kur", | |
| "dolar tl", "döviz kuru", "bugün kur", "dolar kuru", | |
| "exchange rate", "usd tl", "usd try", "dollar rate" | |
| ] | |
| return any(k in msg for k in keywords) | |
| def is_web_search_needed(msg: str) -> bool: | |
| keywords = [ | |
| "araştır", "güncel", "trend", "haber", "son dakika", | |
| "bugün ne oldu", "derin", "analiz et", "piyasa" | |
| ] | |
| return any(k in msg.lower() for k in keywords) | |
| def score_source(text: str, date: str) -> int: | |
| score = 0 | |
| if len(text) > 800: | |
| score += 2 | |
| elif len(text) > 300: | |
| score += 1 | |
| if date: | |
| try: | |
| year_match = re.search(r'20[2-9][0-9]', date) | |
| if year_match: | |
| year = int(year_match.group()) | |
| current_year = time.localtime().tm_year | |
| if year >= current_year - 1: | |
| score += 3 | |
| elif year >= current_year - 2: | |
| score += 2 | |
| except: | |
| pass | |
| return score | |
| def tavily_search(query: str, days: int = 7, max_results: int = 10): | |
| if not TAVILY_API_KEY: | |
| return [] | |
| key = normalize_query(query) | |
| now = time.time() | |
| if key in SEARCH_CACHE: | |
| ts, data = SEARCH_CACHE[key] | |
| if now - ts < CACHE_TTL: | |
| return data | |
| payload = { | |
| "api_key": TAVILY_API_KEY, | |
| "query": query, | |
| "search_depth": "advanced", | |
| "max_results": max_results, | |
| "days": days | |
| } | |
| try: | |
| r = requests.post("https://api.tavily.com/search", json=payload, timeout=30) | |
| if r.status_code == 200: | |
| results = r.json().get("results", []) | |
| SEARCH_CACHE[key] = (now, results) | |
| return results | |
| except Exception: | |
| pass | |
| return [] | |
| def run_web_synthesis(question: str, web_text: str) -> Dict[str, Any]: | |
| messages = [ | |
| {"role": "system", "content": WEB_SYNTHESIS_PROMPT}, | |
| {"role": "user", "content": f"SORU:\n{question}\n\nWEB VERİLERİ:\n{web_text}"} | |
| ] | |
| # Önce Gemini dene | |
| gemini_result = call_gemini_text(messages, max_tokens=800, temperature=0.2) | |
| if gemini_result: | |
| try: | |
| content = re.sub(r"```json|```", "", gemini_result).strip() | |
| return json.loads(content) | |
| except: | |
| pass | |
| # Groq yedek | |
| try: | |
| resp = requests.post( | |
| "https://api.groq.com/openai/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {GROQ_API_KEY}"}, | |
| json={ | |
| "model": GROQ_TEXT_MODEL, | |
| "messages": messages, | |
| "temperature": 0.2, | |
| "max_tokens": 600 | |
| }, | |
| timeout=45 | |
| ) | |
| if resp.status_code == 200: | |
| content = resp.json()["choices"][0]["message"]["content"] | |
| content = re.sub(r"```json|```", "", content).strip() | |
| return json.loads(content) | |
| except Exception: | |
| pass | |
| return {"summary": "", "key_points": [], "final_comment": ""} | |
| # -------------------- GEMINI TEXT CALLER (HTTP API) -------------------- | |
| def call_gemini_text(messages: list, max_tokens: int = 1500, temperature: float = 0.6) -> Optional[str]: | |
| """Gemini HTTP API ile metin üretir. Hata olursa None döner.""" | |
| if not GEMINI_API_KEY: | |
| return None | |
| try: | |
| system_msg = next((m["content"] for m in messages if m["role"] == "system"), "") | |
| user_msg = next((m["content"] for m in messages if m["role"] == "user"), "") | |
| full_prompt = f"{system_msg}\n\n{user_msg}".strip() if system_msg else user_msg | |
| url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={GEMINI_API_KEY}" | |
| payload = { | |
| "contents": [{"parts": [{"text": full_prompt}]}], | |
| "generationConfig": {"maxOutputTokens": max_tokens, "temperature": temperature} | |
| } | |
| resp = requests.post(url, json=payload, timeout=60) | |
| if resp.status_code != 200: | |
| print(f"Gemini text error: {resp.status_code} {resp.text[:200]}") | |
| return None | |
| return resp.json()["candidates"][0]["content"]["parts"][0]["text"] | |
| except Exception as e: | |
| print(f"Gemini text error: {e}") | |
| return None | |
| # -------------------- OPENAI CALLER -------------------- | |
| def call_openai_text(messages: list, max_tokens: int = 1500, temperature: float = 0.6, model: str = None) -> Optional[str]: | |
| if not OPENAI_API_KEY: | |
| return None | |
| try: | |
| use_model = model or OPENAI_FREE_MODEL | |
| resp = requests.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}, | |
| json={"model": use_model, "messages": messages, "max_tokens": max_tokens, "temperature": temperature}, | |
| timeout=60 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json()["choices"][0]["message"]["content"].strip() | |
| print(f"OpenAI error: {resp.status_code} {resp.text[:200]}") | |
| except Exception as e: | |
| print(f"OpenAI error: {e}") | |
| return None | |
| # -------------------- GROQ FALLBACK -------------------- | |
| def call_groq_text(messages: list, max_tokens: int = 1500, temperature: float = 0.6) -> Optional[str]: | |
| """Groq ile metin üretir. Hata olursa None döner.""" | |
| if not GROQ_API_KEY: | |
| return None | |
| try: | |
| resp = requests.post( | |
| "https://api.groq.com/openai/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {GROQ_API_KEY}"}, | |
| json={ | |
| "model": GROQ_TEXT_MODEL, | |
| "messages": messages, | |
| "temperature": temperature, | |
| "max_tokens": max_tokens | |
| }, | |
| timeout=60 | |
| ) | |
| if resp.status_code == 200: | |
| return resp.json()["choices"][0]["message"]["content"].strip() | |
| except Exception as e: | |
| print(f"Groq text error: {e}") | |
| return None | |
| def call_ai(messages: list, max_tokens: int = 1500, temperature: float = 0.6, model: str = None) -> str: | |
| """OpenAI → Gemini → Groq sıralamasıyla dene.""" | |
| # Önce OpenAI dene (GPT-4.1 Nano) | |
| result = call_openai_text(messages, max_tokens, temperature, model) | |
| if result: | |
| return result | |
| # Gemini yedek | |
| result = call_gemini_text(messages, max_tokens, temperature) | |
| if result: | |
| return result | |
| # Groq son yedek | |
| result = call_groq_text(messages, max_tokens, temperature) | |
| if result: | |
| return result | |
| return "Sistem şu anda yanıt veremiyor. Lütfen daha sonra tekrar deneyin." | |
| # -------------------- GEMINI VISION (HTTP API) -------------------- | |
| def process_with_gemini_vision(user_prompt: str, file_b64: str, file_type: str) -> str: | |
| """GPT-4o Mini ile görsel/PDF analiz eder.""" | |
| if not OPENAI_API_KEY: | |
| return "HATA: OpenAI API Key eksik!" | |
| print("--- VISION BAŞLIYOR: GPT-4o Mini ---") | |
| try: | |
| # Base64 temizle | |
| if "," in file_b64: | |
| clean_b64 = file_b64.split(",")[1] | |
| else: | |
| clean_b64 = file_b64 | |
| clean_b64 = clean_b64.strip() | |
| file_bytes = base64.b64decode(clean_b64) | |
| print(f"--- Dosya Boyutu: {len(file_bytes)} bytes ---") | |
| prompt = user_prompt if user_prompt else "Bu görseli detaylıca Türkçe analiz et." | |
| if file_type == "pdf": | |
| # PDF için metin çıkar | |
| pdf_text = extract_pdf_text(file_bytes) | |
| if not pdf_text.strip(): | |
| return "PDF içeriği okunamadı veya boş." | |
| messages = [ | |
| {"role": "system", "content": "Sen ZenkaMind PDF analiz modülüsün. Türkçe analiz et."}, | |
| {"role": "user", "content": prompt + "\n\nPDF İÇERİĞİ:\n" + pdf_text[:4000]} | |
| ] | |
| result = call_openai_text(messages, max_tokens=1500, temperature=0.4, model="gpt-4o-mini") | |
| return result or "PDF analiz edilemedi." | |
| else: | |
| # Görsel için GPT-4o Mini vision | |
| resp = requests.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}, | |
| json={ | |
| "model": "gpt-4o-mini", | |
| "max_tokens": 1500, | |
| "messages": [ | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "text", "text": "Sen ZenkaMind görsel analiz modülüsün. Gördüğün her şeyi detaylı ve TÜRKÇE anlat.\n\n" + prompt}, | |
| {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{clean_b64}"}} | |
| ] | |
| } | |
| ] | |
| }, | |
| timeout=60 | |
| ) | |
| print(f"--- GPT Vision Status: {resp.status_code} ---") | |
| if resp.status_code != 200: | |
| print(f"--- GPT Vision Hata: {resp.text[:300]} ---") | |
| return f"Görsel analiz hatası: {resp.text[:200]}" | |
| result = resp.json()["choices"][0]["message"]["content"] | |
| result = sanitize_identity_leak(result) | |
| print("--- VISION BAŞARILI ---") | |
| return result | |
| except Exception as e: | |
| print(f"--- VISION HATASI: {e} ---") | |
| return f"Görsel analiz hatası: {str(e)}" | |
| # -------------------- KİŞİSEL BAĞLAM -------------------- | |
| def build_personal_context(user_memory: Optional[Dict[str, Any]]) -> str: | |
| if not user_memory: | |
| return "" | |
| parts = [] | |
| if user_memory.get("name"): | |
| parts.append(f"- Kullanıcının adı: {user_memory['name']}") | |
| if user_memory.get("interests"): | |
| interests = ", ".join(user_memory["interests"][:5]) | |
| parts.append(f"- İlgi alanları: {interests}") | |
| if user_memory.get("tone"): | |
| tone_map = { | |
| "formal": "resmi ve profesyonel", | |
| "casual": "samimi ve rahat", | |
| "technical": "teknik ve detaylı" | |
| } | |
| tone = tone_map.get(user_memory["tone"], user_memory["tone"]) | |
| parts.append(f"- Tercih ettiği iletişim tonu: {tone}") | |
| if user_memory.get("last_topics"): | |
| topics = ", ".join(user_memory["last_topics"][:3]) | |
| parts.append(f"- Son konuştuğu konular: {topics}") | |
| if not parts: | |
| return "" | |
| return "\n\nKULLANICI HAKKINDA:\n" + "\n".join(parts) + "\nBu bilgileri kullanarak kişiselleştirilmiş yanıtlar ver." | |
| # -------------------- PREMIUM CHAT (CLAUDE SONNET) -------------------- | |
| CLAUDE_API_KEY = os.getenv("CLAUDE_API_KEY", "") | |
| CLAUDE_MODEL = "claude-sonnet-4-6" | |
| async def premium_chat(message: str, history: list, user_memory: Optional[Dict[str, Any]] = None, model_config: Dict[str, Any] = None) -> dict: | |
| """Premium kullanıcılar için seçilen modeli kullanır.""" | |
| personal_context = build_personal_context(user_memory) | |
| api_model = model_config["api_model"] if model_config else DEFAULT_PREMIUM_MODEL | |
| max_tok = model_config.get("max_tokens", 2000) if model_config else 2000 | |
| temp = model_config.get("temp", 0.6) if model_config else 0.6 | |
| # Model bazlı system prompt | |
| model_id = model_config.get("_selected_model", "") if model_config else "" | |
| if "code" in str(model_id): | |
| system_prompt = CODE_SYSTEM_PROMPT + personal_context | |
| elif "reasoning" in str(model_id): | |
| system_prompt = REASONING_SYSTEM_PROMPT + personal_context | |
| else: | |
| system_prompt = CHAT_SYSTEM_PROMPT + personal_context | |
| messages_list = [{"role": "system", "content": system_prompt}] | |
| for item in history[-6:]: | |
| if isinstance(item, list) and len(item) == 2: | |
| user_msg, bot_msg = item | |
| if user_msg: | |
| messages_list.append({"role": "user", "content": str(user_msg)}) | |
| if bot_msg: | |
| messages_list.append({"role": "assistant", "content": str(bot_msg)}) | |
| messages_list.append({"role": "user", "content": message}) | |
| text = call_ai(messages_list, max_tokens=max_tok, temperature=temp, model=api_model) | |
| text = sanitize_identity_leak(text) | |
| return {"response": text, "remaining": 999, "is_premium": True} | |
| # -------------------- ROUTE -------------------- | |
| async def chat(body: ChatRequest, authorization: str = Header(None), request: Request = None): | |
| try: | |
| message = (body.message or "").strip() | |
| if not message: | |
| return ONBOARDING | |
| # ---------- USER ID & PREMIUM KONTROLÜ ---------- | |
| user_id = body.user_id or (authorization or "anon") | |
| is_premium = is_premium_user(user_id) | |
| # ---------- MODEL ÇÖZÜMLEME ---------- | |
| model_config = resolve_model(body.model, is_premium) | |
| # Ücretsiz kullanıcı ücretli model seçtiyse → ENGELLE, satın almaya yönlendir | |
| if model_config.get("blocked"): | |
| blocked_model = model_config.get("blocked_model", "") | |
| required_plan = model_config.get("required_plan", "pro") | |
| plan_name = "Pro" if required_plan == "pro" else "Enterprise" | |
| model_names = { | |
| "zenkamind-3.0-ultra": "ZenkaMind 3.0 Ultra", | |
| "zenkamind-3.0": "ZenkaMind 3.0", | |
| "zenkamind-2.4": "ZenkaMind 2.4", | |
| "zenkamind-vision": "ZenkaMind Vision", | |
| "zenkamind-code": "ZenkaMind Code", | |
| "zenkamind-reasoning": "ZenkaMind Reasoning", | |
| "zenkamind-turbo": "ZenkaMind Turbo", | |
| } | |
| display_name = model_names.get(blocked_model, blocked_model) | |
| return { | |
| "response": f"🔒 {display_name} modeli {plan_name} plan gerektirir.\n\n" | |
| f"Bu modeli kullanmak için {plan_name} plana geçmen gerekiyor.\n\n" | |
| f"Şu an ücretsiz olarak ZenkaMind 2.4 Mini ve ZenkaMind Nano modellerini kullanabilirsin.", | |
| "model_blocked": True, | |
| "required_plan": required_plan | |
| } | |
| api_model = model_config["api_model"] | |
| model_max_tokens = model_config.get("max_tokens", 1500) | |
| model_temp = model_config.get("temp", 0.6) | |
| print(f"[MODEL] user={user_id[:20]} | selected={body.model} | resolved={api_model} | premium={is_premium}") | |
| # ---------- GÖRSEL / PDF → DİREKT GEMİNİ ---------- | |
| if body.image or body.pdf_file: | |
| print("!!! GÖRSEL TESPİT EDİLDİ - GEMINI ÇAĞRILIYOR !!!") | |
| target_file = body.image or body.pdf_file | |
| file_type = "image" if body.image else "pdf" | |
| vision_response = process_with_gemini_vision( | |
| user_prompt=message, | |
| file_b64=target_file, | |
| file_type=file_type | |
| ) | |
| return {"response": vision_response} | |
| # ---------- MESAJ LİMİTİ KONTROLÜ ---------- | |
| limit_result = check_and_increment_message(user_id, is_premium) | |
| if not limit_result["allowed"]: | |
| return { | |
| "response": "🔒 Ücretsiz mesaj hakkın doldu (20/20).\n\nPremium'a geçerek sınırsız kullanmaya devam edebilirsin.", | |
| "limit_reached": True, | |
| "remaining": 0 | |
| } | |
| # ---------- HAFIZA YÜKLE ---------- | |
| if user_id and user_id != "anon": | |
| sb_memory = load_memory_from_supabase(user_id) | |
| if sb_memory and body.user_memory: | |
| merged = dict(sb_memory) | |
| merged.update({k: v for k, v in body.user_memory.items() if v}) | |
| body.user_memory = merged | |
| elif sb_memory: | |
| body.user_memory = sb_memory | |
| # ---------- LOG ---------- | |
| print(f"[LOG] user={user_id[:20] if user_id else 'anon'} | msg={message[:100]}") | |
| # ---------- IDENTITY OVERRIDE ---------- | |
| lower_msg = message.lower() | |
| if any(q in lower_msg for q in IDENTITY_QUESTIONS): | |
| return {"response": ZENKAMIND_IDENTITY_RESPONSE} | |
| # Greeting kontrolü kaldırıldı - model doğal karşılasın | |
| # ---------- AJAN MODU ---------- | |
| agent_type = is_agent_request(message) | |
| if agent_type: | |
| print(f"[AGENT] type={agent_type} | user={user_id[:20]}") | |
| agent_result = await run_agent(agent_type, message, api_model, body.user_memory) | |
| agent_result = sanitize_identity_leak(agent_result) | |
| log_to_supabase(user_id, message, agent_result[:2000]) | |
| return {"response": agent_result, "remaining": limit_result["remaining"], "agent_mode": True} | |
| # ---------- REALTIME SORU ---------- | |
| if is_realtime_question(message): | |
| web_results = tavily_search(message, days=1, max_results=5) | |
| if not web_results: | |
| return { | |
| "response": "Şu an güncel veriye erişemedim. Birkaç dakika sonra tekrar deneyin.", | |
| "remaining": limit_result["remaining"] | |
| } | |
| info = "\n".join(r.get("content", "") for r in web_results) | |
| messages_list = [ | |
| {"role": "system", "content": "Güncel finans verisini net ve kısa şekilde açıkla. Kaçamak yapma. Tahmin ekleme. TÜRKÇE yaz."}, | |
| {"role": "user", "content": f"SORU: {message}\n\nVERİ:\n{info}"} | |
| ] | |
| text = call_ai(messages_list, max_tokens=300, temperature=0.1, model=api_model) | |
| text = sanitize_identity_leak(text) | |
| text = force_turkish_only(text) | |
| return {"response": text, "remaining": limit_result["remaining"]} | |
| # ---------- WEB SENTEZ (Araştırma) ---------- | |
| if is_web_search_needed(message): | |
| web_results = tavily_search(message) | |
| total_chars = sum(len(r.get("content", "")) for r in web_results) | |
| if len(web_results) < MIN_WEB_RESULT or total_chars < MIN_WEB_CHARS: | |
| return { | |
| "response": "Yeterli güncel veri bulunamadı. Daha dar bir soru sor.", | |
| "remaining": limit_result["remaining"] | |
| } | |
| filtered = [] | |
| for r in web_results: | |
| date_field = r.get("published_date") or r.get("date") or "" | |
| score = score_source(r.get("content", ""), date_field) | |
| if score >= 2: | |
| filtered.append(r.get("content", "")) | |
| info_block = "\n".join(filtered) | |
| synthesis = run_web_synthesis(message, info_block) | |
| final_output = f"""SORU: {message} | |
| SENTEZ ÖZET: | |
| {synthesis.get('summary', '')} | |
| ÖNEMLİ NOKTALAR: | |
| {chr(10).join(synthesis.get('key_points', []))} | |
| ZENKAMIND YORUMU: | |
| {synthesis.get('final_comment', '')}""" | |
| messages_list = [ | |
| {"role": "system", "content": "Web sentez sonuçlarını düzenli ve akıcı Türkçe ile sun. TÜRKÇE yaz."}, | |
| {"role": "user", "content": final_output} | |
| ] | |
| text = call_ai(messages_list, max_tokens=1800, temperature=0.3, model=api_model) | |
| text = sanitize_identity_leak(text) | |
| text = force_turkish_only(text) | |
| return {"response": text, "remaining": limit_result["remaining"]} | |
| # ---------- NORMAL CHAT ---------- | |
| # Premium kullanıcılar seçtikleri modeli kullanır | |
| if is_premium: | |
| return await premium_chat(message, body.history or [], body.user_memory, model_config) | |
| # Kullanıcı hafızasından kişisel bağlam oluştur | |
| personal_context = build_personal_context(body.user_memory) | |
| # Model bazlı özel system prompt | |
| if body.model == "zenkamind-code": | |
| system_prompt = CODE_SYSTEM_PROMPT + personal_context | |
| elif body.model == "zenkamind-reasoning": | |
| system_prompt = REASONING_SYSTEM_PROMPT + personal_context | |
| else: | |
| system_prompt = CHAT_SYSTEM_PROMPT + personal_context | |
| # History'yi messages_list'e ekle (son 6 mesaj) | |
| lang_p = LANG_PROMPTS.get(body.lang or "tr", "") | |
| if lang_p: | |
| system_prompt = system_prompt + "\n\n" + lang_p | |
| messages_list = [{"role": "system", "content": system_prompt}] | |
| if body.history: | |
| for item in body.history[-6:]: | |
| if isinstance(item, list) and len(item) == 2: | |
| user_msg, bot_msg = item | |
| if user_msg: | |
| messages_list.append({"role": "user", "content": str(user_msg)}) | |
| if bot_msg: | |
| messages_list.append({"role": "assistant", "content": str(bot_msg)}) | |
| messages_list.append({"role": "user", "content": message}) | |
| # Ücretsiz kullanıcı da seçtiği modeli kullanır (free planlar izinli) | |
| text = call_ai(messages_list, max_tokens=model_max_tokens, temperature=model_temp, model=api_model) | |
| text = sanitize_identity_leak(text) | |
| text = force_turkish_only(text) | |
| print(f"[LOG] response={text[:100]}") | |
| if user_id and user_id != 'anon' and body.user_memory: | |
| save_memory_to_supabase(user_id, body.user_memory) | |
| log_to_supabase(user_id, message, text) | |
| return {"response": text, "remaining": limit_result["remaining"]} | |
| except Exception as e: | |
| traceback.print_exc() | |
| return {"response": "Sistem hatası oluştu. Lütfen tekrar deneyin."} | |
| # -------------------- SHOPIER WEBHOOK -------------------- | |
| SHOPIER_SECRET = os.getenv("SHOPIER_SECRET", "zenkamind2024") # HF'de ayarla | |
| async def shopier_webhook(request: Request): | |
| try: | |
| body = await request.json() | |
| print(f"📦 Shopier Webhook: {json.dumps(body)[:500]}") | |
| # Shopier'dan gelen alanlar (değişebilir, logdan kontrol et) | |
| email = ( | |
| body.get("buyer_email") or | |
| body.get("email") or | |
| body.get("buyer", {}).get("email") or | |
| body.get("customer_email") or | |
| "" | |
| ).strip().lower() | |
| order_id = str(body.get("order_id") or body.get("id") or "") | |
| status = str(body.get("status") or body.get("payment_status") or "").lower() | |
| print(f"📧 Email: {email} | Order: {order_id} | Status: {status}") | |
| # Ödeme başarılıysa premium yap | |
| # Shopier "approved", "completed", "paid" gibi status gönderir | |
| if email and (status in ["approved", "completed", "paid", "success", "1"] or status == ""): | |
| add_premium_user(email, order_id) | |
| return {"status": "ok", "message": f"{email} premium yapıldı"} | |
| return {"status": "ignored", "message": f"Status: {status}"} | |
| except Exception as e: | |
| print(f"Webhook error: {e}") | |
| return {"status": "error", "message": str(e)} | |
| async def get_memory(user_id: str): | |
| memory = load_memory_from_supabase(user_id) | |
| return {"user_id": user_id, "memory": memory} | |
| # -------------------- STREAMING ENDPOINT -------------------- | |
| from fastapi.responses import StreamingResponse | |
| import asyncio | |
| async def chat_stream(body: ChatRequest, authorization: str = Header(None)): | |
| async def generate(): | |
| try: | |
| message = (body.message or "").strip() | |
| user_id = body.user_id or "anon" | |
| is_premium = is_premium_user(user_id) | |
| # Limit kontrolü | |
| limit_result = check_and_increment_message(user_id, is_premium) | |
| if not limit_result["allowed"]: | |
| yield f"data: {json.dumps({'text': '🔒 Ücretsiz mesaj hakkın doldu. Premium al!', 'done': True})}\n\n" | |
| return | |
| # Identity check | |
| if any(q in message.lower() for q in IDENTITY_QUESTIONS): | |
| for word in ZENKAMIND_IDENTITY_RESPONSE.split(): | |
| yield f"data: {json.dumps({'text': word + ' ', 'done': False})}\n\n" | |
| await asyncio.sleep(0.05) | |
| yield f"data: {json.dumps({'text': '', 'done': True})}\n\n" | |
| return | |
| # Model çöz | |
| model_config = resolve_model(body.model, is_premium) | |
| api_model = model_config["api_model"] | |
| max_tok = model_config.get("max_tokens", 1500) | |
| temp = model_config.get("temp", 0.6) | |
| # Hafıza yükle | |
| if user_id != "anon": | |
| sb_memory = load_memory_from_supabase(user_id) | |
| if sb_memory: | |
| body.user_memory = sb_memory | |
| # Ajan modu | |
| agent_type = is_agent_request(message) | |
| if agent_type: | |
| agent_steps = { | |
| "research": ["🔍 Araştırılıyor...", "✅ Kaynaklar bulundu.", "🧠 Analiz ediliyor...", "✍️ Rapor yazılıyor..."], | |
| "email": ["🔍 Araştırılıyor...", "🧠 Analiz ediliyor...", "✉️ Mail yazılıyor..."], | |
| "code": ["💻 Kod yazılıyor...", "🧪 Test ekleniyor...", "✅ Hazır!"], | |
| "plan": ["🎯 Hedef analiz ediliyor...", "📋 Plan oluşturuluyor...", "📅 Takip sistemi kuruluyor..."] | |
| } | |
| steps = agent_steps.get(agent_type, ["🤖 Çalışıyor..."]) | |
| for step in steps: | |
| yield f"data: {json.dumps({'text': step + '\n', 'done': False})}\n\n" | |
| await asyncio.sleep(0.8) | |
| yield f"data: {json.dumps({'text': '\n---\n\n', 'done': False})}\n\n" | |
| result = await run_agent(agent_type, message, api_model, body.user_memory) | |
| # Adım adım gönder | |
| result_text = result.split("---")[-1].strip() if "---" in result else result | |
| words = result_text.split(" ") | |
| for word in words: | |
| yield f"data: {json.dumps({'text': word + ' ', 'done': False})}\n\n" | |
| await asyncio.sleep(0.02) | |
| yield f"data: {json.dumps({'text': '', 'done': True})}\n\n" | |
| return | |
| # Normal chat - OpenAI streaming | |
| personal_context = build_personal_context(body.user_memory) | |
| lang_p = LANG_PROMPTS.get(body.lang or "tr", "") | |
| system_prompt = CHAT_SYSTEM_PROMPT + personal_context | |
| if lang_p: | |
| system_prompt += "\n\n" + lang_p | |
| messages_list = [{"role": "system", "content": system_prompt}] | |
| if body.history: | |
| for item in body.history[-6:]: | |
| if isinstance(item, list) and len(item) == 2: | |
| if item[0]: messages_list.append({"role": "user", "content": str(item[0])}) | |
| if item[1]: messages_list.append({"role": "assistant", "content": str(item[1])}) | |
| messages_list.append({"role": "user", "content": message}) | |
| # OpenAI streaming | |
| import requests as req | |
| resp = req.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"}, | |
| json={"model": api_model, "messages": messages_list, "max_tokens": max_tok, "temperature": temp, "stream": True}, | |
| stream=True, timeout=60 | |
| ) | |
| full_text = "" | |
| for line in resp.iter_lines(): | |
| if line: | |
| line = line.decode("utf-8") | |
| if line.startswith("data: "): | |
| data = line[6:] | |
| if data == "[DONE]": | |
| break | |
| try: | |
| chunk = json.loads(data) | |
| delta = chunk["choices"][0]["delta"].get("content", "") | |
| if delta: | |
| full_text += delta | |
| yield f"data: {json.dumps({'text': delta, 'done': False})}\n\n" | |
| except: | |
| pass | |
| full_text = sanitize_identity_leak(full_text) | |
| log_to_supabase(user_id, message, full_text) | |
| if user_id != "anon" and body.user_memory: | |
| save_memory_to_supabase(user_id, body.user_memory) | |
| yield f"data: {json.dumps({'text': '', 'done': True, 'remaining': limit_result['remaining']})}\n\n" | |
| except Exception as e: | |
| print(f"Stream error: {e}") | |
| yield f"data: {json.dumps({'text': 'Hata oluştu: ' + str(e), 'done': True})}\n\n" | |
| return StreamingResponse(generate(), media_type="text/event-stream", | |
| headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}) | |
| # Premium durum sorgulama (frontend kullanır) | |
| async def check_premium(email: str): | |
| premium = is_premium_user(email) | |
| return {"email": email, "is_premium": premium} | |
| # -------------------- ONBOARDING -------------------- | |
| async def onboarding(): | |
| return ONBOARDING | |
| # -------------------- VISION PDF ENDPOINT -------------------- | |
| async def vision_pdf(request: Request, authorization: str = Header(None)): | |
| body = await request.json() | |
| pdf_base64 = body.get("pdf_base64") | |
| if not pdf_base64: | |
| raise HTTPException(status_code=400, detail="PDF verisi yok") | |
| try: | |
| pdf_bytes = base64.b64decode(fix_base64(pdf_base64)) | |
| text = extract_pdf_text(pdf_bytes) | |
| return { | |
| "text": text.strip(), | |
| "pages": text.count("\n"), | |
| "engine": "ZenkaMind PDF Analyzer" | |
| } | |
| except Exception: | |
| raise HTTPException(status_code=500, detail="PDF analiz hatası") | |
| # -------------------- RUN -------------------- | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |