arslanasghar6637 commited on
Commit
c6b7fe8
·
verified ·
1 Parent(s): 976eb18

Create CHST.PY

Browse files
Files changed (1) hide show
  1. CHST.PY +41 -0
CHST.PY ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
+ import torch
3
+
4
+ # Load pre-trained model and tokenizer
5
+ model_name = "gpt2" # You can use other models like "gpt2-medium", "gpt2-large", etc.
6
+ model = GPT2LMHeadModel.from_pretrained(model_name)
7
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
8
+
9
+ # Add special tokens if using specific fine-tuned models
10
+ tokenizer.pad_token = tokenizer.eos_token
11
+ model.resize_token_embeddings(len(tokenizer))
12
+
13
+ # Device configuration (use GPU if available)
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ model.to(device)
16
+
17
+ # Function to generate AI response
18
+ def get_ai_response(user_input):
19
+ # Tokenize input and encode it
20
+ inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt").to(device)
21
+
22
+ # Generate response (max_length can be adjusted)
23
+ outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2, top_p=0.95, temperature=0.7)
24
+
25
+ # Decode response
26
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+ return response
28
+
29
+ # Main chatbot loop
30
+ print("AI Chatbot is ready to talk! Type 'quit' to end the conversation.")
31
+ while True:
32
+ user_input = input("You: ")
33
+
34
+ # Exit condition
35
+ if user_input.lower() == 'quit':
36
+ print("AI Chatbot: Goodbye!")
37
+ break
38
+
39
+ # Get AI response
40
+ ai_response = get_ai_response(user_input)
41
+ print(f"AI Chatbot: {ai_response}")