RahulGanapathy commited on
Commit
e95d517
·
verified ·
1 Parent(s): 6bbb2e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -49
app.py CHANGED
@@ -1,68 +1,89 @@
 
 
1
  import requests
 
 
2
  from fastapi import FastAPI
3
  from pydantic import BaseModel
4
- from huggingface_hub import InferenceClient
5
- import uvicorn
6
- import asyncio
7
- import os
8
- from dotenv import load_dotenv
9
- from telegram import Update
10
- from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
11
-
12
- # Load environment variables
13
- load_dotenv()
14
- TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
15
 
16
  # Initialize FastAPI
17
  app = FastAPI()
18
 
19
- # Hugging Face Model Client
20
- client = InferenceClient("RahulGanapathy/MisInfo-ChatBot")
 
 
 
 
21
 
22
- # Request schema for Telegram API
23
- class RequestData(BaseModel):
24
  text: str
 
25
 
26
- @app.post("/predict")
27
- async def predict(data: RequestData):
28
- """API endpoint for Telegram bot and Gradio UI."""
29
- try:
30
- response = client.text_generation(prompt=data.text, max_new_tokens=100)
31
- return {"response": response}
32
- except Exception as e:
33
- return {"response": f"Error: {str(e)}"}
 
 
34
 
35
- # 🚀 Telegram Bot Functions
36
- async def start(update: Update, context: CallbackContext):
37
- """Handles /start command."""
38
- await update.message.reply_text("Hello! Send me a message and I'll respond!")
 
39
 
40
- async def handle_message(update: Update, context: CallbackContext):
41
- """Handles user messages and gets response from Hugging Face API."""
42
- user_message = update.message.text
43
- response = requests.post("http://0.0.0.0:8000/predict", json={"text": user_message})
44
 
45
- result = response.json()["response"] if response.status_code == 200 else "Error: Unable to get response from AI."
46
- await update.message.reply_text(result)
47
 
48
- async def run_telegram_bot():
49
- """Runs the Telegram bot asynchronously."""
50
- application = Application.builder().token(TELEGRAM_BOT_TOKEN).build()
51
- application.add_handler(CommandHandler("start", start))
52
- application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
 
 
 
53
 
54
- # Start polling in the same event loop without trying to close it
55
- await application.run_polling()
56
 
57
- async def main():
58
- """Runs FastAPI and Telegram bot concurrently."""
59
- # Start FastAPI
60
- config = uvicorn.Config(app, host="0.0.0.0", port=8000)
61
- server = uvicorn.Server(config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- # Run FastAPI & Telegram bot together
64
- await asyncio.gather(server.serve(), run_telegram_bot())
 
65
 
66
- # 🚀 Start both services
67
  if __name__ == "__main__":
68
- asyncio.run(main())
 
1
+ import gradio as gr
2
+ import os
3
  import requests
4
+ import uvicorn
5
+ from huggingface_hub import InferenceClient
6
  from fastapi import FastAPI
7
  from pydantic import BaseModel
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  # Initialize FastAPI
10
  app = FastAPI()
11
 
12
+ # Hugging Face Model for Gradio & API
13
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
14
+
15
+ # Telegram Bot Token (Replace with your actual token)
16
+ TELEGRAM_BOT_TOKEN = "7880465873:AAFDakza41P2drekyUzv-2o8Yp-TBRU7TiU"
17
+ TELEGRAM_API_URL = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage"
18
 
19
+ # Define request structure for Telegram API
20
+ class Message(BaseModel):
21
  text: str
22
+ chat_id: int
23
 
24
+ # Function to generate chatbot responses
25
+ def respond(
26
+ message,
27
+ history: list[tuple[str, str]],
28
+ system_message,
29
+ max_tokens,
30
+ temperature,
31
+ top_p,
32
+ ):
33
+ messages = [{"role": "system", "content": system_message}]
34
 
35
+ for val in history:
36
+ if val[0]:
37
+ messages.append({"role": "user", "content": val[0]})
38
+ if val[1]:
39
+ messages.append({"role": "assistant", "content": val[1]})
40
 
41
+ messages.append({"role": "user", "content": message})
 
 
 
42
 
43
+ response = ""
 
44
 
45
+ for message in client.chat_completion(
46
+ messages,
47
+ max_tokens=max_tokens,
48
+ stream=True,
49
+ temperature=temperature,
50
+ top_p=top_p,
51
+ ):
52
+ token = message.choices[0].delta.content
53
 
54
+ response += token
55
+ yield response
56
 
57
+ # Gradio UI
58
+ demo = gr.ChatInterface(
59
+ respond,
60
+ additional_inputs=[
61
+ gr.Textbox(value="You are a helpful Assistant. You let the user to know if the information they share with you is true or false. You return the actual facts behind the information given, after stating the state of Truth or False.", label="System message"),
62
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
63
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
64
+ gr.Slider(
65
+ minimum=0.1,
66
+ maximum=1.0,
67
+ value=0.95,
68
+ step=0.05,
69
+ label="Top-p (nucleus sampling)",
70
+ ),
71
+ ],
72
+ )
73
+
74
+ # Mount Gradio UI on the root `/`
75
+ app = gr.mount_gradio_app(app, demo, path="/")
76
+
77
+ @app.post("/predict")
78
+ def predict(data: Message):
79
+ """Handles Telegram messages and sends a response."""
80
+ messages = [{"role": "user", "content": data.text}]
81
+ response = client.chat_completion(messages, max_tokens=512, temperature=0.7)
82
+ bot_reply = response.choices[0].message.content
83
 
84
+ # Send response back to Telegram user
85
+ requests.post(TELEGRAM_API_URL, json={"chat_id": data.chat_id, "text": bot_reply})
86
+ return {"response": bot_reply}
87
 
 
88
  if __name__ == "__main__":
89
+ uvicorn.run(app, host="0.0.0.0", port=7860)