ericwithpy commited on
Commit
911b23c
·
verified ·
1 Parent(s): 5a6f454

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ device = "cuda" if torch.cuda.is_available() else "cpu" # Automatically detect GPU or CPU
5
+ model_name = "tanusrich/Mental_Health_Chatbot"
6
+
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ '''
11
+ model_save_path = "./model"
12
+ # Save model
13
+ model.save_pretrained(model_save_path)
14
+
15
+ # Save tokenizer
16
+ tokenizer.save_pretrained(model_save_path)'''
17
+
18
+ def generate_response(user_input):
19
+ inputs = tokenizer(user_input, return_tensors="pt").to(device)
20
+ with torch.no_grad():
21
+ output = model.generate(
22
+ **inputs,
23
+ max_new_tokens=150,
24
+ temperature=0.7,
25
+ top_k=50,
26
+ top_p=0.9,
27
+ repetition_penalty=1.2,
28
+ pad_token_id=tokenizer.eos_token_id
29
+ )
30
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
31
+ # Extract only chatbot's latest response
32
+ chatbot_response = response.split("Chatbot:")[-1].strip()
33
+
34
+ # Update conversation history
35
+ conversation_history += chatbot_response + "\n"
36
+ return chatbot_response
37
+
38
+ # Continuous conversation loop
39
+ while True:
40
+ user_input = input("You: ") # Take user input
41
+ if user_input.lower() in ["exit", "quit", "stop"]:
42
+ print("Chatbot: Goodbye!")
43
+ break
44
+
45
+ response = generate_response(user_input)
46
+ print("Chatbot:", response)
47
+
48
+
49
+ '''
50
+ # Example
51
+ user_input = "I'm feeling suicidal."
52
+ response = generate_response(user_input)
53
+ print("Chatbot: ", response)
54
+ '''