CHAT_BOT / CHST.PY
arslanasghar6637's picture
Create CHST.PY
c6b7fe8 verified
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Load pre-trained model and tokenizer
model_name = "gpt2" # You can use other models like "gpt2-medium", "gpt2-large", etc.
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# Add special tokens if using specific fine-tuned models
tokenizer.pad_token = tokenizer.eos_token
model.resize_token_embeddings(len(tokenizer))
# Device configuration (use GPU if available)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Function to generate AI response
def get_ai_response(user_input):
# Tokenize input and encode it
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt").to(device)
# Generate response (max_length can be adjusted)
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2, top_p=0.95, temperature=0.7)
# Decode response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Main chatbot loop
print("AI Chatbot is ready to talk! Type 'quit' to end the conversation.")
while True:
user_input = input("You: ")
# Exit condition
if user_input.lower() == 'quit':
print("AI Chatbot: Goodbye!")
break
# Get AI response
ai_response = get_ai_response(user_input)
print(f"AI Chatbot: {ai_response}")