Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,68 +1,89 @@
|
|
|
|
|
|
|
|
| 1 |
import requests
|
|
|
|
|
|
|
| 2 |
from fastapi import FastAPI
|
| 3 |
from pydantic import BaseModel
|
| 4 |
-
from huggingface_hub import InferenceClient
|
| 5 |
-
import uvicorn
|
| 6 |
-
import asyncio
|
| 7 |
-
import os
|
| 8 |
-
from dotenv import load_dotenv
|
| 9 |
-
from telegram import Update
|
| 10 |
-
from telegram.ext import Application, CommandHandler, MessageHandler, filters, CallbackContext
|
| 11 |
-
|
| 12 |
-
# Load environment variables
|
| 13 |
-
load_dotenv()
|
| 14 |
-
TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
|
| 15 |
|
| 16 |
# Initialize FastAPI
|
| 17 |
app = FastAPI()
|
| 18 |
|
| 19 |
-
# Hugging Face Model
|
| 20 |
-
client = InferenceClient("
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
#
|
| 23 |
-
class
|
| 24 |
text: str
|
|
|
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
| 39 |
|
| 40 |
-
|
| 41 |
-
"""Handles user messages and gets response from Hugging Face API."""
|
| 42 |
-
user_message = update.message.text
|
| 43 |
-
response = requests.post("http://0.0.0.0:8000/predict", json={"text": user_message})
|
| 44 |
|
| 45 |
-
|
| 46 |
-
await update.message.reply_text(result)
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
-
#
|
| 64 |
-
|
|
|
|
| 65 |
|
| 66 |
-
# 🚀 Start both services
|
| 67 |
if __name__ == "__main__":
|
| 68 |
-
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
import requests
|
| 4 |
+
import uvicorn
|
| 5 |
+
from huggingface_hub import InferenceClient
|
| 6 |
from fastapi import FastAPI
|
| 7 |
from pydantic import BaseModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Initialize FastAPI
|
| 10 |
app = FastAPI()
|
| 11 |
|
| 12 |
+
# Hugging Face Model for Gradio & API
|
| 13 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 14 |
+
|
| 15 |
+
# Telegram Bot Token (Replace with your actual token)
|
| 16 |
+
TELEGRAM_BOT_TOKEN = "7880465873:AAFDakza41P2drekyUzv-2o8Yp-TBRU7TiU"
|
| 17 |
+
TELEGRAM_API_URL = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage"
|
| 18 |
|
| 19 |
+
# Define request structure for Telegram API
|
| 20 |
+
class Message(BaseModel):
|
| 21 |
text: str
|
| 22 |
+
chat_id: int
|
| 23 |
|
| 24 |
+
# Function to generate chatbot responses
|
| 25 |
+
def respond(
|
| 26 |
+
message,
|
| 27 |
+
history: list[tuple[str, str]],
|
| 28 |
+
system_message,
|
| 29 |
+
max_tokens,
|
| 30 |
+
temperature,
|
| 31 |
+
top_p,
|
| 32 |
+
):
|
| 33 |
+
messages = [{"role": "system", "content": system_message}]
|
| 34 |
|
| 35 |
+
for val in history:
|
| 36 |
+
if val[0]:
|
| 37 |
+
messages.append({"role": "user", "content": val[0]})
|
| 38 |
+
if val[1]:
|
| 39 |
+
messages.append({"role": "assistant", "content": val[1]})
|
| 40 |
|
| 41 |
+
messages.append({"role": "user", "content": message})
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
response = ""
|
|
|
|
| 44 |
|
| 45 |
+
for message in client.chat_completion(
|
| 46 |
+
messages,
|
| 47 |
+
max_tokens=max_tokens,
|
| 48 |
+
stream=True,
|
| 49 |
+
temperature=temperature,
|
| 50 |
+
top_p=top_p,
|
| 51 |
+
):
|
| 52 |
+
token = message.choices[0].delta.content
|
| 53 |
|
| 54 |
+
response += token
|
| 55 |
+
yield response
|
| 56 |
|
| 57 |
+
# Gradio UI
|
| 58 |
+
demo = gr.ChatInterface(
|
| 59 |
+
respond,
|
| 60 |
+
additional_inputs=[
|
| 61 |
+
gr.Textbox(value="You are a helpful Assistant. You let the user to know if the information they share with you is true or false. You return the actual facts behind the information given, after stating the state of Truth or False.", label="System message"),
|
| 62 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 63 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 64 |
+
gr.Slider(
|
| 65 |
+
minimum=0.1,
|
| 66 |
+
maximum=1.0,
|
| 67 |
+
value=0.95,
|
| 68 |
+
step=0.05,
|
| 69 |
+
label="Top-p (nucleus sampling)",
|
| 70 |
+
),
|
| 71 |
+
],
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Mount Gradio UI on the root `/`
|
| 75 |
+
app = gr.mount_gradio_app(app, demo, path="/")
|
| 76 |
+
|
| 77 |
+
@app.post("/predict")
|
| 78 |
+
def predict(data: Message):
|
| 79 |
+
"""Handles Telegram messages and sends a response."""
|
| 80 |
+
messages = [{"role": "user", "content": data.text}]
|
| 81 |
+
response = client.chat_completion(messages, max_tokens=512, temperature=0.7)
|
| 82 |
+
bot_reply = response.choices[0].message.content
|
| 83 |
|
| 84 |
+
# Send response back to Telegram user
|
| 85 |
+
requests.post(TELEGRAM_API_URL, json={"chat_id": data.chat_id, "text": bot_reply})
|
| 86 |
+
return {"response": bot_reply}
|
| 87 |
|
|
|
|
| 88 |
if __name__ == "__main__":
|
| 89 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|