Spaces:
Sleeping
Sleeping
File size: 3,285 Bytes
4883af7 4a198c4 4883af7 4a198c4 4883af7 b675fc7 6c64035 b675fc7 4883af7 4a198c4 4883af7 b675fc7 4883af7 b44f777 4883af7 4a198c4 4883af7 6c64035 4883af7 ea9eb9f 4883af7 6c64035 4883af7 b675fc7 48fdf51 4a198c4 48fdf51 4883af7 4a198c4 4883af7 2312098 82bca33 4a198c4 82bca33 4a198c4 82bca33 4883af7 09955ab b675fc7 800fd8a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | import os
from fastapi import FastAPI, Request
from pinecone import Pinecone
from groq import Groq
import httpx
import re
# 1. Configuration & Clients
# Use Hugging Face Secrets for these!
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_TOKEN")
TELEGRAM_URL = f"https://149.154.167.220/bot{TELEGRAM_TOKEN}/sendMessage"
EMBED_MODEL= os.environ.get("EMBED_MODEL")
GROQ_MODEL = os.environ.get("GROQ_MODEL")
PROMPT = os.environ.get("PROMPT")
pc = Pinecone(api_key=PINECONE_API_KEY)
index = pc.Index("customerserviceindex")
groq_client = Groq(api_key=GROQ_API_KEY)
app = FastAPI()
def clean_ai_response(text: str):
# إزالة كل ما بين وسمي <think> و </think> بما في ذلك الوسوم نفسها
cleaned_text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
return cleaned_text.strip()
# 2. The Core AI Logic
async def get_ai_response(user_query: str):
# Vectorize query using Pinecone Inference
query_embedding = pc.inference.embed(
model=EMBED_MODEL,
inputs=[user_query],
parameters={"input_type": "query"}
)
# Search Pinecone for Bank Context
search_results = index.query(
vector=query_embedding[0].values,
top_k=3,
include_metadata=True
)
retrieved_context = "\n".join([res.metadata['original_text'] for res in search_results.matches])
prompt = f"""
{PROMPT}
Message:{user_query}
Retrieved Context:{retrieved_context}
Final Answer:
"""
completion = groq_client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=GROQ_MODEL,
temperature=0.1,
max_completion_tokens=800,
top_p=0.9,
)
ai_response = completion.choices[0].message.content
return clean_ai_response(ai_response)
# 3. The Webhook Endpoint
@app.post("/webhook")
async def telegram_webhook(request: Request):
data = await request.json()
if "message" in data:
chat_id = data["message"]["chat"]["id"]
user_text = data["message"].get("text", "")
if user_text:
# Get the intelligent response
ai_answer = await get_ai_response(user_text)
# Send back to Telegram
async with httpx.AsyncClient(verify=False) as client:
await client.post(
TELEGRAM_URL,
headers={"Host": "api.telegram.org"},
json={
"chat_id": chat_id,
"text": ai_answer,
"parse_mode": "Markdown"
}
)
return {"status": "ok"}
@app.get("/")
async def root():
return {"message": "Hadhramout Bank AI Backend is Live"}
@app.post("/test")
async def test_webhook(request: Request):
data = await request.json()
response = await get_ai_response(data["message"]["text"])
return {"response": response}
import socket
@app.get("/dns-test")
async def dns_test():
try:
ip = socket.gethostbyname("api.telegram.org")
return {"resolved_ip": ip}
except Exception as e:
return {"error": str(e)} |