kgrabko commited on
Commit
3212438
·
verified ·
1 Parent(s): 3e5be87

Update chatbot_1b.py

Browse files
Files changed (1) hide show
  1. chatbot_1b.py +186 -164
chatbot_1b.py CHANGED
@@ -1,165 +1,187 @@
1
- # Copyright (c) 2025 CMS Manhattan
2
- # All rights reserved.
3
- #
4
- # This file is part of a project authored by CMS Manhattan.
5
- # You may use, distribute, and modify this code under the terms of the Apache 2.0 license.
6
-
7
- import torch
8
- import torch.nn.functional as F
9
- from transformers import GPT2TokenizerFast
10
- from gpt_modern_8b import JiRackPyTorch # Same import used in fine-tuning
11
- from pathlib import Path
12
-
13
- # ============================= GENERATION SETTINGS =============================
14
- # Temperature: Lower = more focused, conservative, and predictable responses
15
- # Start with 0.7. Increase to 0.8–0.9 if the model starts repeating itself
16
- TEMPERATURE = 0.7
17
-
18
- # Top-K: Limits sampling to the K most likely next tokens
19
- # Start with 50. Increase if output feels too safe/boring
20
- TOP_K = 50
21
-
22
- # Max Length: Maximum number of new tokens to generate per response
23
- MAX_LENGTH = 120
24
-
25
- # ============================= PATHS =============================
26
- LAST_TRAINED_PATH = Path("build/fine_tuning_output/epoch2/gpt_finetuned.pt")
27
- FINAL_OUTPUT_DIR = Path("build/fine_tuning_output/epoch2") # Folder containing the .pt
28
- MODEL_SAVE_NAME = "gpt_finetuned.pt"
29
-
30
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
- print(f"Using device: {device}")
32
-
33
- # ============================= CHATBOT CLASS =============================
34
- class Chatbot:
35
- def __init__(self, model_path: Path):
36
- # 1. Load tokenizer (offline-safe recommended see note below)
37
- print("Loading standard GPT-2 tokenizer...")
38
- # For full offline use, replace "gpt2" with "./tokenizers/gpt2" after first download
39
- self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
40
- self.tokenizer.pad_token = self.tokenizer.eos_token
41
-
42
- # 2. Initialize model architecture
43
- print("Initializing JiRackPyTorch model...")
44
- self.model = JiRackPyTorch().to(device)
45
- self.model.eval()
46
-
47
- # 3. Load latest trained weights
48
- load_path = None
49
- candidate1 = FINAL_OUTPUT_DIR / MODEL_SAVE_NAME
50
- candidate2 = model_path if model_path.is_file() else None
51
-
52
- if candidate1.exists():
53
- load_path = candidate1
54
- print(f"Found weights in final folder: {load_path}")
55
- elif candidate2 and candidate2.exists():
56
- load_path = candidate2
57
- print(f"Loading weights from: {load_path}")
58
- else:
59
- print("Warning: No trained weights found. Running with randomly initialized model.")
60
-
61
- if load_path:
62
- print(f"Loading state dict from {load_path}...")
63
- self.model.load_state_dict(torch.load(load_path, map_location=device))
64
- print("Weights loaded successfully!")
65
-
66
- print(f"Model is now running on {device} — ready for chat!\n")
67
-
68
- def generate_response(self, prompt: str, max_length: int = MAX_LENGTH,
69
- temperature: float = TEMPERATURE, top_k: int = TOP_K) -> str:
70
- input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(device)
71
-
72
- with torch.no_grad():
73
- for _ in range(max_length):
74
- # Forward pass
75
- logits, _ = self.model(input_ids) # JiRackPyTorch returns (logits, past_kv)
76
-
77
- # Get logits for the last generated token
78
- next_token_logits = logits[:, -1, :]
79
-
80
- # Apply temperature
81
- if temperature != 1.0:
82
- next_token_logits = next_token_logits / temperature
83
-
84
- # Apply Top-K sampling
85
- if top_k > 0:
86
- values, indices = torch.topk(next_token_logits, top_k)
87
- next_token_logits = torch.full_like(next_token_logits, float('-inf'))
88
- next_token_logits.scatter_(1, indices, values)
89
-
90
- # Sample next token
91
- probabilities = F.softmax(next_token_logits, dim=-1)
92
- next_token = torch.multinomial(probabilities, num_samples=1)
93
-
94
- # Append to sequence
95
- input_ids = torch.cat([input_ids, next_token], dim=-1)
96
-
97
- # Early stop on EOS or custom end-of-utterance token
98
- token_str = self.tokenizer.decode(next_token.item())
99
- if "__eou__" in token_str or next_token.item() == self.tokenizer.eos_token_id:
100
- break
101
-
102
- # Decode full output and strip prompt
103
- full_output = self.tokenizer.decode(input_ids[0], skip_special_tokens=False)
104
- response = full_output[len(prompt):].strip()
105
-
106
- # Clean up any leftover markers
107
- response = response.replace("__eou__", "").strip()
108
-
109
- return response
110
-
111
-
112
- # ============================= MAIN CHAT LOOP =============================
113
- def main():
114
- global TEMPERATURE, TOP_K
115
-
116
- print("Starting JiRack Chatbot...")
117
- chatbot = Chatbot(LAST_TRAINED_PATH)
118
-
119
- print("\n" + "=" * 70)
120
- print(f"JIRACK CHATBOT ONLINE")
121
- print(f"Temperature: {TEMPERATURE} | Top-K: {TOP_K} | Max Length: {MAX_LENGTH}")
122
- print("Type 'quit' or 'exit' to exit")
123
- print("Change settings: set temp=0.8 or set k=80")
124
- print("=" * 70 + "\n")
125
-
126
- while True:
127
- try:
128
- user_input = input("You: ").strip()
129
-
130
- if user_input.lower() in {"quit", "exit", "bye"}:
131
- print("Goodbye!")
132
- break
133
-
134
- # Live parameter tuning
135
- if user_input.lower().startswith("set temp="):
136
- try:
137
- TEMPERATURE = float(user_input.split("=")[1])
138
- print(f"Temperature {TEMPERATURE}")
139
- except:
140
- print("Invalid format. Use: set temp=0.7")
141
- continue
142
-
143
- if user_input.lower().startswith("set k="):
144
- try:
145
- TOP_K = int(user_input.split("=")[1])
146
- print(f"Top-K {TOP_K}")
147
- except:
148
- print("Invalid format. Use: set k=50")
149
- continue
150
-
151
- if not user_input:
152
- continue
153
-
154
- print("Generating...", end="\r")
155
- response = chatbot.generate_response(user_input)
156
- print(f"JiRack: {response}\n")
157
-
158
- except KeyboardInterrupt:
159
- print("\n\nShutting down...")
160
- break
161
- except Exception as e:
162
- print(f"Error: {e}")
163
-
164
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  main()
 
1
+ # Copyright (c) 2025 CMS Manhattan
2
+ # All rights reserved.
3
+ # Author: Konstantin Vladimirovich Grabko
4
+ # Email: grabko@cmsmanhattan.com
5
+ # Phone: +1(516)777-0945
6
+ #
7
+ # MIT License
8
+ #
9
+ # Copyright (c) 2025 Konstantin Grabko
10
+ #
11
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ # of this software and associated documentation files (the "Software"), to deal
13
+ # in the Software without restriction, including without limitation the rights
14
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ # copies of the Software, and to permit persons to whom the Software is
16
+ # furnished to do so, subject to the following conditions:
17
+ #
18
+ # The above copyright notice and this permission notice shall be included in all
19
+ # copies or substantial portions of the Software.
20
+ #
21
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ # SOFTWARE.
28
+
29
+ import torch
30
+ import torch.nn.functional as F
31
+ from transformers import GPT2TokenizerFast
32
+ from gpt_modern_8b import JiRackPyTorch # Same import used in fine-tuning
33
+ from pathlib import Path
34
+
35
+ # ============================= GENERATION SETTINGS =============================
36
+ # Temperature: Lower = more focused, conservative, and predictable responses
37
+ # Start with 0.7. Increase to 0.8–0.9 if the model starts repeating itself
38
+ TEMPERATURE = 0.7
39
+
40
+ # Top-K: Limits sampling to the K most likely next tokens
41
+ # Start with 50. Increase if output feels too safe/boring
42
+ TOP_K = 50
43
+
44
+ # Max Length: Maximum number of new tokens to generate per response
45
+ MAX_LENGTH = 120
46
+
47
+ # ============================= PATHS =============================
48
+ LAST_TRAINED_PATH = Path("build/fine_tuning_output/epoch2/gpt_finetuned.pt")
49
+ FINAL_OUTPUT_DIR = Path("build/fine_tuning_output/epoch2") # Folder containing the .pt
50
+ MODEL_SAVE_NAME = "gpt_finetuned.pt"
51
+
52
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
53
+ print(f"Using device: {device}")
54
+
55
+ # ============================= CHATBOT CLASS =============================
56
+ class Chatbot:
57
+ def __init__(self, model_path: Path):
58
+ # 1. Load tokenizer (offline-safe recommended — see note below)
59
+ print("Loading standard GPT-2 tokenizer...")
60
+ # For full offline use, replace "gpt2" with "./tokenizers/gpt2" after first download
61
+ self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
62
+ self.tokenizer.pad_token = self.tokenizer.eos_token
63
+
64
+ # 2. Initialize model architecture
65
+ print("Initializing JiRackPyTorch model...")
66
+ self.model = JiRackPyTorch().to(device)
67
+ self.model.eval()
68
+
69
+ # 3. Load latest trained weights
70
+ load_path = None
71
+ candidate1 = FINAL_OUTPUT_DIR / MODEL_SAVE_NAME
72
+ candidate2 = model_path if model_path.is_file() else None
73
+
74
+ if candidate1.exists():
75
+ load_path = candidate1
76
+ print(f"Found weights in final folder: {load_path}")
77
+ elif candidate2 and candidate2.exists():
78
+ load_path = candidate2
79
+ print(f"Loading weights from: {load_path}")
80
+ else:
81
+ print("Warning: No trained weights found. Running with randomly initialized model.")
82
+
83
+ if load_path:
84
+ print(f"Loading state dict from {load_path}...")
85
+ self.model.load_state_dict(torch.load(load_path, map_location=device))
86
+ print("Weights loaded successfully!")
87
+
88
+ print(f"Model is now running on {device} — ready for chat!\n")
89
+
90
+ def generate_response(self, prompt: str, max_length: int = MAX_LENGTH,
91
+ temperature: float = TEMPERATURE, top_k: int = TOP_K) -> str:
92
+ input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(device)
93
+
94
+ with torch.no_grad():
95
+ for _ in range(max_length):
96
+ # Forward pass
97
+ logits, _ = self.model(input_ids) # JiRackPyTorch returns (logits, past_kv)
98
+
99
+ # Get logits for the last generated token
100
+ next_token_logits = logits[:, -1, :]
101
+
102
+ # Apply temperature
103
+ if temperature != 1.0:
104
+ next_token_logits = next_token_logits / temperature
105
+
106
+ # Apply Top-K sampling
107
+ if top_k > 0:
108
+ values, indices = torch.topk(next_token_logits, top_k)
109
+ next_token_logits = torch.full_like(next_token_logits, float('-inf'))
110
+ next_token_logits.scatter_(1, indices, values)
111
+
112
+ # Sample next token
113
+ probabilities = F.softmax(next_token_logits, dim=-1)
114
+ next_token = torch.multinomial(probabilities, num_samples=1)
115
+
116
+ # Append to sequence
117
+ input_ids = torch.cat([input_ids, next_token], dim=-1)
118
+
119
+ # Early stop on EOS or custom end-of-utterance token
120
+ token_str = self.tokenizer.decode(next_token.item())
121
+ if "__eou__" in token_str or next_token.item() == self.tokenizer.eos_token_id:
122
+ break
123
+
124
+ # Decode full output and strip prompt
125
+ full_output = self.tokenizer.decode(input_ids[0], skip_special_tokens=False)
126
+ response = full_output[len(prompt):].strip()
127
+
128
+ # Clean up any leftover markers
129
+ response = response.replace("__eou__", "").strip()
130
+
131
+ return response
132
+
133
+
134
+ # ============================= MAIN CHAT LOOP =============================
135
+ def main():
136
+ global TEMPERATURE, TOP_K
137
+
138
+ print("Starting JiRack Chatbot...")
139
+ chatbot = Chatbot(LAST_TRAINED_PATH)
140
+
141
+ print("\n" + "=" * 70)
142
+ print(f"JIRACK CHATBOT ONLINE")
143
+ print(f"Temperature: {TEMPERATURE} | Top-K: {TOP_K} | Max Length: {MAX_LENGTH}")
144
+ print("Type 'quit' or 'exit' to exit")
145
+ print("Change settings: set temp=0.8 or set k=80")
146
+ print("=" * 70 + "\n")
147
+
148
+ while True:
149
+ try:
150
+ user_input = input("You: ").strip()
151
+
152
+ if user_input.lower() in {"quit", "exit", "bye"}:
153
+ print("Goodbye!")
154
+ break
155
+
156
+ # Live parameter tuning
157
+ if user_input.lower().startswith("set temp="):
158
+ try:
159
+ TEMPERATURE = float(user_input.split("=")[1])
160
+ print(f"Temperature → {TEMPERATURE}")
161
+ except:
162
+ print("Invalid format. Use: set temp=0.7")
163
+ continue
164
+
165
+ if user_input.lower().startswith("set k="):
166
+ try:
167
+ TOP_K = int(user_input.split("=")[1])
168
+ print(f"Top-K → {TOP_K}")
169
+ except:
170
+ print("Invalid format. Use: set k=50")
171
+ continue
172
+
173
+ if not user_input:
174
+ continue
175
+
176
+ print("Generating...", end="\r")
177
+ response = chatbot.generate_response(user_input)
178
+ print(f"JiRack: {response}\n")
179
+
180
+ except KeyboardInterrupt:
181
+ print("\n\nShutting down...")
182
+ break
183
+ except Exception as e:
184
+ print(f"Error: {e}")
185
+
186
+ if __name__ == "__main__":
187
  main()