Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
-
import asyncio
|
| 2 |
-
import threading
|
| 3 |
-
import os
|
| 4 |
-
from dotenv import load_dotenv
|
| 5 |
import requests
|
| 6 |
-
from fastapi import FastAPI
|
| 7 |
from pydantic import BaseModel
|
| 8 |
from huggingface_hub import InferenceClient
|
| 9 |
import uvicorn
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
from telegram import Update
|
| 11 |
from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
|
| 12 |
|
| 13 |
-
# Load environment variables
|
| 14 |
load_dotenv()
|
| 15 |
TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
|
| 16 |
|
|
@@ -20,54 +20,45 @@ app = FastAPI()
|
|
| 20 |
# Hugging Face Model Client
|
| 21 |
client = InferenceClient("RahulGanapathy/MisInfo-ChatBot")
|
| 22 |
|
| 23 |
-
# Request schema for
|
| 24 |
class RequestData(BaseModel):
|
| 25 |
text: str
|
| 26 |
|
| 27 |
@app.post("/predict")
|
| 28 |
async def predict(data: RequestData):
|
| 29 |
-
"""
|
| 30 |
-
API endpoint for AI response.
|
| 31 |
-
"""
|
| 32 |
try:
|
| 33 |
response = client.text_generation(prompt=data.text, max_new_tokens=100)
|
| 34 |
return {"response": response}
|
| 35 |
except Exception as e:
|
| 36 |
-
|
| 37 |
|
| 38 |
# π Telegram Bot Integration
|
| 39 |
async def start(update: Update, context: CallbackContext):
|
| 40 |
-
"""
|
| 41 |
-
await update.message.reply_text("Hello! Send me a message and I'll
|
| 42 |
|
| 43 |
async def handle_message(update: Update, context: CallbackContext):
|
| 44 |
-
"""Handles user messages and
|
| 45 |
user_message = update.message.text
|
| 46 |
-
response = requests.post(
|
| 47 |
-
"http://0.0.0.0:8000/predict",
|
| 48 |
-
json={"text": user_message},
|
| 49 |
-
)
|
| 50 |
-
|
| 51 |
-
if response.status_code == 200:
|
| 52 |
-
result = response.json()["response"]
|
| 53 |
-
else:
|
| 54 |
-
result = "Error: Unable to get response from AI."
|
| 55 |
|
|
|
|
| 56 |
await update.message.reply_text(result)
|
| 57 |
|
| 58 |
-
# π Start Telegram Bot
|
| 59 |
async def run_telegram_bot():
|
|
|
|
| 60 |
application = Application.builder().token(TELEGRAM_BOT_TOKEN).build()
|
| 61 |
application.add_handler(CommandHandler("start", start))
|
| 62 |
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
|
| 63 |
-
|
| 64 |
-
await application.run_polling()
|
| 65 |
|
| 66 |
-
#
|
| 67 |
-
|
| 68 |
-
async def startup_event():
|
| 69 |
-
asyncio.create_task(run_telegram_bot())
|
| 70 |
|
| 71 |
-
#
|
| 72 |
-
|
| 73 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import requests
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
from pydantic import BaseModel
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
import uvicorn
|
| 6 |
+
import asyncio
|
| 7 |
+
import threading
|
| 8 |
+
import os
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
from telegram import Update
|
| 11 |
from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
|
| 12 |
|
| 13 |
+
# Load environment variables
|
| 14 |
load_dotenv()
|
| 15 |
TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
|
| 16 |
|
|
|
|
| 20 |
# Hugging Face Model Client
|
| 21 |
client = InferenceClient("RahulGanapathy/MisInfo-ChatBot")
|
| 22 |
|
| 23 |
+
# Request schema for Telegram API
|
| 24 |
class RequestData(BaseModel):
|
| 25 |
text: str
|
| 26 |
|
| 27 |
@app.post("/predict")
|
| 28 |
async def predict(data: RequestData):
|
| 29 |
+
"""API endpoint for Telegram bot and Gradio UI."""
|
|
|
|
|
|
|
| 30 |
try:
|
| 31 |
response = client.text_generation(prompt=data.text, max_new_tokens=100)
|
| 32 |
return {"response": response}
|
| 33 |
except Exception as e:
|
| 34 |
+
return {"response": f"Error: {str(e)}"}
|
| 35 |
|
| 36 |
# π Telegram Bot Integration
|
| 37 |
async def start(update: Update, context: CallbackContext):
|
| 38 |
+
"""Send a welcome message when the /start command is issued."""
|
| 39 |
+
await update.message.reply_text("Hello! Send me a message and I'll respond!")
|
| 40 |
|
| 41 |
async def handle_message(update: Update, context: CallbackContext):
|
| 42 |
+
"""Handles user messages and gets response from Hugging Face API."""
|
| 43 |
user_message = update.message.text
|
| 44 |
+
response = requests.post("http://0.0.0.0:8000/predict", json={"text": user_message})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
result = response.json()["response"] if response.status_code == 200 else "Error: Unable to get response from AI."
|
| 47 |
await update.message.reply_text(result)
|
| 48 |
|
|
|
|
| 49 |
async def run_telegram_bot():
|
| 50 |
+
"""Runs the Telegram bot using asyncio properly."""
|
| 51 |
application = Application.builder().token(TELEGRAM_BOT_TOKEN).build()
|
| 52 |
application.add_handler(CommandHandler("start", start))
|
| 53 |
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# Start polling as an async task
|
| 56 |
+
asyncio.create_task(application.run_polling())
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
# π Start FastAPI in a separate thread
|
| 59 |
+
def run_fastapi():
|
| 60 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
| 61 |
+
|
| 62 |
+
# Run FastAPI & Telegram Bot
|
| 63 |
+
if __name__ == "__main__":
|
| 64 |
+
threading.Thread(target=run_fastapi, daemo
|