tyagonzales66's picture
bon
e2aca56 verified
from fastapi import FastAPI, Request, HTTPException
import os
import requests
from typing import Dict, Any
from openai import OpenAI
import httpx
import time
app = FastAPI()
# Configuration de l'API d'inférence NVIDIA
BOT_USERNAME = "@DiscussionBot"
NVIDIA_API_URL = "https://integrate.api.nvidia.com/v1"
NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
MODEL = "mistralai/mixtral-8x7b-instruct-v0.1"
# Vérification de la clé API NVIDIA
if not NVIDIA_API_KEY:
raise ValueError("NVIDIA_API_KEY environment variable is not set.")
# Vérification du jeton Hugging Face
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable is not set.")
# Initialisation du client NVIDIA
try:
client = OpenAI(
base_url=NVIDIA_API_URL,
api_key=NVIDIA_API_KEY,
http_client=httpx.Client(proxies=None, timeout=30.0)
)
except Exception as e:
raise ValueError(f"Failed to initialize NVIDIA API client: {str(e)}")
@app.get("/")
async def root(request: Request) -> Dict[str, Any]:
"""
Gère les requêtes GET et renvoie une réponse JSON simple.
"""
return {
"method": request.method,
"host": request.headers.get("host", "unknown"),
"user_agent": request.headers.get("user-agent", "unknown"),
"query_test": request.query_params.get("test", None),
"message": "Webhook server is running. Use POST to interact with the bot."
}
@app.post("/")
async def webhook(request: Request) -> Dict[str, Any]:
"""
Gère les requêtes POST des Webhooks pour traiter les commentaires mentionnant le bot.
"""
try:
if request.headers.get("X-Webhook-Secret") != os.getenv("WEBHOOK_SECRET"):
raise HTTPException(status_code=400, detail="Secret incorrect")
data = await request.json()
event = data.get("event", {})
if (
event.get("action") == "create"
and event.get("scope") == "discussion.comment"
and BOT_USERNAME in data.get("comment", {}).get("content", "")
):
# Préparation du prompt pour l'API NVIDIA
messages = [
{
"role": "user",
"content": f"Faites comme si vous étiez un robot qui répond aux discussions sur l'apprentissage automatique et répondez au commentaire suivant :\n{data['comment']['content']}"
}
]
# Requête à l'API NVIDIA
try:
completion = client.chat.completions.create(
model=MODEL,
messages=messages,
temperature=0.5,
top_p=1,
max_tokens=100,
stream=True
)
continuation_text = ""
for chunk in completion:
if chunk.choices[0].delta.content is not None:
continuation_text += chunk.choices[0].delta.content
continuation_text = continuation_text.strip()
if not continuation_text:
raise ValueError("No text generated by NVIDIA API")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Inference failed: {str(e)}")
# Publication du commentaire avec gestion des nouvelles tentatives
comment_url = data["discussion"]["url"]["api"] + "/comment"
for attempt in range(5): # 5 tentatives maximum
try:
comment_response = requests.post(
comment_url,
headers={
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
},
json={"comment": continuation_text},
)
comment_response.raise_for_status()
# Vérifier si la réponse contient "newMessage" (succès implicite)
if comment_response.json().get("newMessage"):
return {"success": True}
return {"success": True}
except requests.exceptions.HTTPError as e:
if "429" in str(e) or "rate-limited" in str(e):
time.sleep((2 ** attempt) * 10) # Backoff exponentiel plus long
continue
raise HTTPException(status_code=500, detail=f"Failed to post comment: {str(e)}")
raise HTTPException(status_code=429, detail="Rate limit exceeded after retries")
return {"success": False}
except HTTPException as he:
raise he
except Exception as e:
raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)