Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,14 +2,15 @@ import os
|
|
| 2 |
from fastapi import FastAPI, Request
|
| 3 |
from pinecone import Pinecone
|
| 4 |
from groq import Groq
|
| 5 |
-
import httpx
|
|
|
|
| 6 |
|
| 7 |
# 1. Configuration & Clients
|
| 8 |
# Use Hugging Face Secrets for these!
|
| 9 |
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
|
| 10 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 11 |
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
|
| 12 |
-
TELEGRAM_URL = f"https://
|
| 13 |
|
| 14 |
EMBED_MODEL= os.environ.get("EMBED_MODEL")
|
| 15 |
GROQ_MODEL = os.environ.get("GROQ_MODEL")
|
|
@@ -21,6 +22,11 @@ groq_client = Groq(api_key=GROQ_API_KEY)
|
|
| 21 |
|
| 22 |
app = FastAPI()
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
# 2. The Core AI Logic
|
| 25 |
async def get_ai_response(user_query: str):
|
| 26 |
# Vectorize query using Pinecone Inference
|
|
@@ -39,8 +45,7 @@ async def get_ai_response(user_query: str):
|
|
| 39 |
|
| 40 |
retrieved_context = "\n".join([res.metadata['original_text'] for res in search_results.matches])
|
| 41 |
|
| 42 |
-
|
| 43 |
-
# We use facts from the profile: Islamic banking, based in Mukalla [cite: 15, 6]
|
| 44 |
prompt = f"""
|
| 45 |
{PROMPT}
|
| 46 |
|
|
@@ -55,10 +60,11 @@ async def get_ai_response(user_query: str):
|
|
| 55 |
messages=[{"role": "user", "content": prompt}],
|
| 56 |
model=GROQ_MODEL,
|
| 57 |
temperature=0.1,
|
| 58 |
-
max_completion_tokens=
|
| 59 |
top_p=0.9,
|
| 60 |
)
|
| 61 |
-
|
|
|
|
| 62 |
|
| 63 |
# 3. The Webhook Endpoint
|
| 64 |
@app.post("/webhook")
|
|
@@ -72,16 +78,15 @@ async def telegram_webhook(request: Request):
|
|
| 72 |
if user_text:
|
| 73 |
# Get the intelligent response
|
| 74 |
ai_answer = await get_ai_response(user_text)
|
| 75 |
-
print("1----------TELEGRAM_TOKEN:", TELEGRAM_TOKEN)
|
| 76 |
-
print("2---------TELEGRAM_URL:", TELEGRAM_URL)
|
| 77 |
# Send back to Telegram
|
| 78 |
-
async with httpx.AsyncClient(
|
| 79 |
await client.post(
|
| 80 |
-
|
| 81 |
headers={"Host": "api.telegram.org"},
|
| 82 |
json={
|
| 83 |
"chat_id": chat_id,
|
| 84 |
-
"text": ai_answer
|
|
|
|
| 85 |
}
|
| 86 |
)
|
| 87 |
|
|
|
|
| 2 |
from fastapi import FastAPI, Request
|
| 3 |
from pinecone import Pinecone
|
| 4 |
from groq import Groq
|
| 5 |
+
import httpx
|
| 6 |
+
import re
|
| 7 |
|
| 8 |
# 1. Configuration & Clients
|
| 9 |
# Use Hugging Face Secrets for these!
|
| 10 |
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
|
| 11 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 12 |
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
|
| 13 |
+
TELEGRAM_URL = f"https://149.154.167.220/bot{TELEGRAM_TOKEN}/sendMessage"
|
| 14 |
|
| 15 |
EMBED_MODEL= os.environ.get("EMBED_MODEL")
|
| 16 |
GROQ_MODEL = os.environ.get("GROQ_MODEL")
|
|
|
|
| 22 |
|
| 23 |
app = FastAPI()
|
| 24 |
|
| 25 |
+
def clean_ai_response(text: str):
|
| 26 |
+
# إزالة كل ما بين وسمي <think> و </think> بما في ذلك الوسوم نفسها
|
| 27 |
+
cleaned_text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
|
| 28 |
+
return cleaned_text.strip()
|
| 29 |
+
|
| 30 |
# 2. The Core AI Logic
|
| 31 |
async def get_ai_response(user_query: str):
|
| 32 |
# Vectorize query using Pinecone Inference
|
|
|
|
| 45 |
|
| 46 |
retrieved_context = "\n".join([res.metadata['original_text'] for res in search_results.matches])
|
| 47 |
|
| 48 |
+
|
|
|
|
| 49 |
prompt = f"""
|
| 50 |
{PROMPT}
|
| 51 |
|
|
|
|
| 60 |
messages=[{"role": "user", "content": prompt}],
|
| 61 |
model=GROQ_MODEL,
|
| 62 |
temperature=0.1,
|
| 63 |
+
max_completion_tokens=800,
|
| 64 |
top_p=0.9,
|
| 65 |
)
|
| 66 |
+
ai_response = completion.choices[0].message.content
|
| 67 |
+
return clean_ai_response(ai_response)
|
| 68 |
|
| 69 |
# 3. The Webhook Endpoint
|
| 70 |
@app.post("/webhook")
|
|
|
|
| 78 |
if user_text:
|
| 79 |
# Get the intelligent response
|
| 80 |
ai_answer = await get_ai_response(user_text)
|
|
|
|
|
|
|
| 81 |
# Send back to Telegram
|
| 82 |
+
async with httpx.AsyncClient() as client:
|
| 83 |
await client.post(
|
| 84 |
+
TELEGRAM_URL,
|
| 85 |
headers={"Host": "api.telegram.org"},
|
| 86 |
json={
|
| 87 |
"chat_id": chat_id,
|
| 88 |
+
"text": ai_answer,
|
| 89 |
+
"parse_mode": "Markdown"
|
| 90 |
}
|
| 91 |
)
|
| 92 |
|