Katherine commited on
Commit
277731f
·
1 Parent(s): 490ec61

chat.py impl

Browse files
Files changed (2) hide show
  1. config.py +2 -2
  2. src/chat.py +14 -2
config.py CHANGED
@@ -4,8 +4,8 @@ from dotenv import load_dotenv
4
  # Load from .env file. Store your HF token in the .env file.
5
  load_dotenv()
6
 
7
-
8
- BASE_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
  # Other options:
10
  # MODEL = "meta-llama/Llama-2-7b-chat-hf"
11
  # MODEL = "openlm-research/open_llama_3b"
 
4
  # Load from .env file. Store your HF token in the .env file.
5
  load_dotenv()
6
 
7
+ BASE_MODEL = "HuggingFaceH4/zephyr-7b-beta"
8
+ # BASE_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
  # Other options:
10
  # MODEL = "meta-llama/Llama-2-7b-chat-hf"
11
  # MODEL = "openlm-research/open_llama_3b"
src/chat.py CHANGED
@@ -37,7 +37,11 @@ class SchoolChatbot:
37
  User: {user_input}
38
  Assistant:"
39
  """
40
- pass
 
 
 
 
41
 
42
  def get_response(self, user_input):
43
  """
@@ -58,4 +62,12 @@ class SchoolChatbot:
58
  - Use self.format_prompt() to format the user's input
59
  - Use self.client to generate responses
60
  """
61
- pass
 
 
 
 
 
 
 
 
 
37
  User: {user_input}
38
  Assistant:"
39
  """
40
+ return (
41
+ "<|system|>You are a helpful assistant that specializes in Boston public school enrollment.<|end|>\n"
42
+ f"<|user|>{user_input}<|end|>\n"
43
+ "<|assistant|>"
44
+ )
45
 
46
  def get_response(self, user_input):
47
  """
 
62
  - Use self.format_prompt() to format the user's input
63
  - Use self.client to generate responses
64
  """
65
+ prompt = self.format_prompt(user_input)
66
+ response = self.client.text_generation(
67
+ prompt,
68
+ max_new_tokens=512,
69
+ temperature=0.7,
70
+ top_p=0.9,
71
+ stop_sequences=["<|end|>"]
72
+ )
73
+ return response.strip()