| import logging |
| from telegram import Update |
| from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import torch |
|
|
| |
| logging.basicConfig( |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
| level=logging.INFO |
| ) |
|
|
| |
| model_name = "نام مدل شما" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
| |
| def generate_response(input_text): |
| inputs = tokenizer(input_text, return_tensors="pt").to("cuda") |
| output = model.generate(**inputs, max_new_tokens=100) |
| response = tokenizer.decode(output[0], skip_special_tokens=True) |
| return response |
|
|
| |
| async def start(update: Update, context): |
| await update.message.reply_text('سلام! من یک بات هوش مصنوعی هستم. میتوانید با من چت کنید.') |
|
|
| |
| async def echo(update: Update, context): |
| user_message = update.message.text |
| response = generate_response(user_message) |
| await update.message.reply_text(response) |
|
|
| |
| def main(): |
| |
| token = "توکن بات شما" |
|
|
| |
| application = ApplicationBuilder().token(token).build() |
|
|
| |
| start_handler = CommandHandler('start', start) |
| application.add_handler(start_handler) |
|
|
| |
| echo_handler = MessageHandler(filters.TEXT & ~filters.COMMAND, echo) |
| application.add_handler(echo_handler) |
|
|
| |
| application.run_polling() |
|
|
| if __name__ == '__main__': |
| main() |