|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
|
|
|
|
|
|
model_name = "microsoft/DialoGPT-medium" |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
def generate_response(prompt): |
|
|
inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") |
|
|
reply_ids = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id) |
|
|
reply = tokenizer.decode(reply_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True) |
|
|
return reply |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("Chatbot: Hello! How can I help you today?") |
|
|
while True: |
|
|
user_input = input("You: ") |
|
|
if user_input.lower() in ["exit", "quit", "bye"]: |
|
|
print("Chatbot: Goodbye!") |
|
|
break |
|
|
response = generate_response(user_input) |
|
|
print(f"Chatbot: {response}") |
|
|
|