GBhaveshKumar commited on
Commit
68a3079
·
verified ·
1 Parent(s): 60007af

Upload 2 files

Browse files
Files changed (2) hide show
  1. chat.py +49 -0
  2. requirements.txt +2 -0
chat.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
+ from transformers import pipeline
3
+ import torch
4
+ import os
5
+
6
+ # Automatically get the current folder
7
+ base_dir = os.path.dirname(os.path.abspath(__file__))
8
+ model_path = os.path.join(base_dir, "chatbot")
9
+ # path to the folder where all your files are
10
+ tokenizer = GPT2Tokenizer.from_pretrained(model_path)
11
+ model = GPT2LMHeadModel.from_pretrained(model_path)
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ model.to(device)
15
+ model.eval()
16
+
17
+ # Create the text generation pipeline
18
+ generator = pipeline(
19
+ "text-generation",
20
+ model=model,
21
+ tokenizer=tokenizer,
22
+ device=0 if torch.cuda.is_available() else -1
23
+ )
24
+
25
+ # Chat function
26
+ def chat(temp=0.5):
27
+ print(f"\n🤖 Chatbot is ready! (temperature={temp}) — type 'exit' to quit.")
28
+ context = ""
29
+ while True:
30
+ user_input = input("You: ")
31
+ if user_input.lower() == "exit":
32
+ break
33
+ context += f"A: {user_input}\nB:"
34
+ result = generator(
35
+ context,
36
+ max_length=len(tokenizer.encode(context)) + 50,
37
+ pad_token_id=tokenizer.eos_token_id,
38
+ do_sample=True,
39
+ top_k=50,
40
+ top_p=0.95,
41
+ temperature=temp
42
+ )[0]["generated_text"]
43
+ reply = result[len(context):].split("\n")[0].strip()
44
+ print(f"Bot: {reply}")
45
+ context += f"{reply}\n"
46
+
47
+ # Start chatting
48
+ if __name__ == "__main__":
49
+ chat(temp=0.8)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch