Ubuntu commited on
Commit
b404f80
·
1 Parent(s): 541ef27

Initial commit for experimental-tars branch

Browse files
EleutherAI/gpt-neo-125M/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 50257
3
+ }
EleutherAI/gpt-neo-125M/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPTNeoForCausalLM"
5
+ ],
6
+ "attention_dropout": 0,
7
+ "attention_layers": [
8
+ "global",
9
+ "local",
10
+ "global",
11
+ "local",
12
+ "global",
13
+ "local",
14
+ "global",
15
+ "local",
16
+ "global",
17
+ "local",
18
+ "global",
19
+ "local"
20
+ ],
21
+ "attention_types": [
22
+ [
23
+ [
24
+ "global",
25
+ "local"
26
+ ],
27
+ 6
28
+ ]
29
+ ],
30
+ "bos_token_id": 50256,
31
+ "classifier_dropout": 0.1,
32
+ "embed_dropout": 0,
33
+ "eos_token_id": 50256,
34
+ "gradient_checkpointing": false,
35
+ "hidden_size": 768,
36
+ "initializer_range": 0.02,
37
+ "intermediate_size": null,
38
+ "layer_norm_epsilon": 1e-05,
39
+ "max_position_embeddings": 2048,
40
+ "model_type": "gpt_neo",
41
+ "num_heads": 12,
42
+ "num_layers": 12,
43
+ "resid_dropout": 0,
44
+ "summary_activation": null,
45
+ "summary_first_dropout": 0.1,
46
+ "summary_proj_to_labels": true,
47
+ "summary_type": "cls_index",
48
+ "summary_use_proj": true,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.51.3",
51
+ "use_cache": true,
52
+ "vocab_size": 50258,
53
+ "window_size": 256
54
+ }
EleutherAI/gpt-neo-125M/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.51.3"
6
+ }
EleutherAI/gpt-neo-125M/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
EleutherAI/gpt-neo-125M/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:293bf2ce72a1e7c30d46ab442d8aa44f53d04bcf980f4f284563820084af8af2
3
+ size 500814408
EleutherAI/gpt-neo-125M/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
EleutherAI/gpt-neo-125M/tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "50257": {
14
+ "content": "[PAD]",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ }
21
+ },
22
+ "bos_token": "<|endoftext|>",
23
+ "clean_up_tokenization_spaces": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "extra_special_tokens": {},
27
+ "model_max_length": 2048,
28
+ "pad_token": "[PAD]",
29
+ "tokenizer_class": "GPT2Tokenizer",
30
+ "unk_token": "<|endoftext|>"
31
+ }
EleutherAI/gpt-neo-125M/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
bert-base-uncased/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "gradient_checkpointing": false,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.51.3",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 30522
25
+ }
bert-base-uncased/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e47716a979def3ee4331621abb95a2a07619cf6428ca798c051201cbbc0ff89
3
+ size 437951328
bert-base-uncased/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
bert-base-uncased/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
bert-base-uncased/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
chat_with_tars.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import BertModel, GPTNeoForCausalLM, AutoTokenizer
5
+
6
+ # ⚙️ Ensure temporary directory is writable
7
+ os.environ["TMPDIR"] = os.path.expanduser("~/tmp")
8
+ os.makedirs(os.environ["TMPDIR"], exist_ok=True)
9
+
10
+ # 💠 Optional modules (brain & heart, if available)
11
+ heart_module = None
12
+ brain_module = None
13
+
14
+ if os.path.isdir("heart"):
15
+ try:
16
+ from heart import heart
17
+ heart_module = heart
18
+ except Exception as e:
19
+ print(f"[⚠️] Heart module error: {e}")
20
+
21
+ if os.path.isdir("brain"):
22
+ try:
23
+ from brain import brain
24
+ brain_module = brain
25
+ except Exception as e:
26
+ print(f"[⚠️] Brain module error: {e}")
27
+
28
+ # TARSQuantumHybrid Class
29
+ class TARSQuantumHybrid(nn.Module):
30
+ def __init__(self, bert_model="bert-base-uncased", gpt_model="EleutherAI/gpt-neo-125M"):
31
+ super(TARSQuantumHybrid, self).__init__()
32
+ self.bert = BertModel.from_pretrained(bert_model)
33
+ self.gpt = GPTNeoForCausalLM.from_pretrained(gpt_model)
34
+
35
+ gpt_hidden_dim = getattr(self.gpt.config, "hidden_size", None) or getattr(self.gpt.config, "n_embd", 768)
36
+ self.embedding_proj = nn.Linear(self.bert.config.hidden_size, gpt_hidden_dim)
37
+
38
+ self.tokenizer = AutoTokenizer.from_pretrained(gpt_model)
39
+
40
+ # Ensure the tokenizer has a padding token
41
+ if self.tokenizer.pad_token is None:
42
+ self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
43
+ self.gpt.resize_token_embeddings(len(self.tokenizer))
44
+ print("✅ Padding token added and model resized.")
45
+
46
+ def forward(self, input_ids, attention_mask=None, decoder_input_ids=None):
47
+ bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)
48
+ cls_embedding = bert_output.last_hidden_state[:, 0, :]
49
+ gpt_input = self.embedding_proj(cls_embedding).unsqueeze(1)
50
+ outputs = self.gpt(inputs_embeds=gpt_input, decoder_input_ids=decoder_input_ids)
51
+ return outputs
52
+
53
+ def chat(self, text, max_length=128):
54
+ # 🧠 Tokenize the input text
55
+ cleaned_text = self.clean_input_text(text)
56
+ if not cleaned_text.strip():
57
+ return "🤖 Please provide a non-empty input."
58
+
59
+ encoded_input = self.safe_tokenization(cleaned_text)
60
+
61
+ # Extract input_ids and attention_mask
62
+ input_ids = encoded_input["input_ids"]
63
+ attention_mask = encoded_input["attention_mask"]
64
+
65
+ # Debug: Check the token IDs and vocab size
66
+ print(f"Input Text: {cleaned_text}")
67
+ print(f"Input IDs: {input_ids}")
68
+ print(f"Vocabulary Size: {self.tokenizer.vocab_size}")
69
+
70
+ # Ensure token IDs are within bounds
71
+ if input_ids.numel() > 0 and input_ids.max() >= self.tokenizer.vocab_size:
72
+ raise ValueError(f"Token ID exceeds model's vocabulary size: {input_ids.max()}")
73
+
74
+ decoder_input_ids = torch.tensor([[self.tokenizer.bos_token_id]])
75
+
76
+ # 🧪 Generate output using the model
77
+ with torch.no_grad():
78
+ outputs = self.forward(
79
+ input_ids=input_ids,
80
+ attention_mask=attention_mask,
81
+ decoder_input_ids=decoder_input_ids,
82
+ )
83
+ generated_ids = torch.argmax(outputs.logits, dim=-1)
84
+
85
+ # Debug: Check the generated token IDs
86
+ print(f"Generated Token IDs: {generated_ids}")
87
+
88
+ raw_response = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
89
+
90
+ # 🧼 Clean model echo by removing the original input from the response
91
+ cleaned = raw_response.replace(cleaned_text, "").strip()
92
+
93
+ # 🧠 Add insights from optional modules (brain & heart)
94
+ extra_thoughts = ""
95
+ if brain_module and hasattr(brain_module, "get_brain_insight"):
96
+ extra_thoughts += f"\n🧠 {brain_module.get_brain_insight()}"
97
+ if heart_module and hasattr(heart_module, "get_heart_feeling"):
98
+ extra_thoughts += f"\n❤️ {heart_module.get_heart_feeling()}"
99
+
100
+ # 🪄 Return final response
101
+ final_response = cleaned if cleaned else "🤖 ...processing quantum entanglement..."
102
+ return final_response + extra_thoughts
103
+
104
+ def clean_input_text(self, text):
105
+ # Remove unwanted characters
106
+ cleaned_text = ''.join(e for e in text if e.isalnum() or e.isspace())
107
+ return cleaned_text
108
+
109
+ def safe_tokenization(self, text):
110
+ token_ids = self.tokenizer.encode(text, add_special_tokens=True)
111
+ # Ensure that token ids are within vocabulary size
112
+ token_ids = [min(i, self.tokenizer.vocab_size - 1) for i in token_ids]
113
+ return {
114
+ "input_ids": torch.tensor(token_ids).unsqueeze(0), # Adding batch dimension
115
+ "attention_mask": torch.ones((1, len(token_ids)), dtype=torch.long)
116
+ }
117
+
118
+ # ✅ Torch-compatible loader
119
+ def load_tars(path="tars_v1.pt"):
120
+ from torch.serialization import add_safe_globals
121
+ add_safe_globals({"TARSQuantumHybrid": TARSQuantumHybrid})
122
+
123
+ model = torch.load(path, weights_only=False)
124
+ model.eval()
125
+ return model
126
+
127
+ # ✅ Start chat loop
128
+ if __name__ == "__main__":
129
+ print("🤖 TARS model loaded successfully. Ready to chat!")
130
+ model = load_tars()
131
+
132
+ while True:
133
+ prompt = input("You: ")
134
+ if prompt.strip().lower() in ["exit", "quit"]:
135
+ print("TARS: Till we meet again in the quantum field. 🌌")
136
+ break
137
+ response = model.chat(prompt)
138
+ print(f"TARS: {response}")
check_models.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertModel, GPTNeoForCausalLM, AutoTokenizer
2
+
3
+ def check_model(model_name, model_class, tokenizer_class):
4
+ try:
5
+ # Try loading the model
6
+ model = model_class.from_pretrained(model_name)
7
+ print(f"✅ {model_name} model loaded successfully.")
8
+ except Exception as e:
9
+ print(f"❌ Failed to load {model_name} model: {e}")
10
+
11
+ try:
12
+ # Try loading the tokenizer
13
+ tokenizer = tokenizer_class.from_pretrained(model_name)
14
+ print(f"✅ {model_name} tokenizer loaded successfully.")
15
+ except Exception as e:
16
+ print(f"❌ Failed to load {model_name} tokenizer: {e}")
17
+
18
+ # Check BERT
19
+ check_model("bert-base-uncased", BertModel, AutoTokenizer)
20
+
21
+ # Check GPT-Neo
22
+ check_model("EleutherAI/gpt-neo-125M", GPTNeoForCausalLM, AutoTokenizer)
patch_pad_token.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from transformers import BertTokenizer, BertModel, GPT2Tokenizer, GPTNeoForCausalLM
4
+
5
+ # Debugging: Print the current working directory to ensure the file is in the correct location
6
+ print(f"Current Working Directory: {os.getcwd()}")
7
+
8
+ # Debugging: Print the file path of chat_with_tars
9
+ import chat_with_tars
10
+ print(f"chat_with_tars file path: {chat_with_tars.__file__}")
11
+
12
+ def patch_pad_token(model_name, tokenizer_class, model_class):
13
+ print(f"🔄 Loading tokenizer and model: {model_name}...")
14
+ tokenizer = tokenizer_class.from_pretrained(model_name)
15
+ model = model_class.from_pretrained(model_name)
16
+
17
+ # Debugging: Print tokenizer and model configurations
18
+ print(f"Tokenizer Configuration: {tokenizer}")
19
+ print(f"Model Configuration: {model.config}")
20
+
21
+ # Add a padding token
22
+ tokenizer.add_special_tokens({'pad_token': '[PAD]'})
23
+ model.resize_token_embeddings(len(tokenizer))
24
+
25
+ # Debugging: Print the new vocabulary size
26
+ print(f"New Vocabulary Size: {len(tokenizer)}")
27
+
28
+ # Save the model with the new padding token
29
+ model.save_pretrained(model_name)
30
+ tokenizer.save_pretrained(model_name)
31
+
32
+ print("✅ Padding token added and model resized.")
33
+ print("✅ Model saved with padding token patched.")
34
+
35
+ if __name__ == "__main__":
36
+ # Patch GPT-Neo
37
+ gpt_model_name = 'EleutherAI/gpt-neo-125M'
38
+ patch_pad_token(gpt_model_name, GPT2Tokenizer, GPTNeoForCausalLM)
39
+
40
+ # Patch BERT
41
+ bert_model_name = 'bert-base-uncased'
42
+ patch_pad_token(bert_model_name, BertTokenizer, BertModel)
tars_v1_model.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import BertModel, GPTNeoForCausalLM, AutoTokenizer
5
+
6
+ # ⚙️ Ensure temporary directory is writable (especially for low-RAM, low-disk setups)
7
+ os.environ["TMPDIR"] = os.path.expanduser("~/tmp") # adjust if needed
8
+ os.makedirs(os.environ["TMPDIR"], exist_ok=True)
9
+
10
+ # 💠 Optional modules
11
+ heart_module = None
12
+ brain_module = None
13
+
14
+ if os.path.isdir("heart"):
15
+ try:
16
+ from heart import heart
17
+ heart_module = heart
18
+ except Exception as e:
19
+ print(f"[⚠️] Heart module error: {e}")
20
+
21
+ if os.path.isdir("brain"):
22
+ try:
23
+ from brain import brain
24
+ brain_module = brain
25
+ except Exception as e:
26
+ print(f"[⚠️] Brain module error: {e}")
27
+
28
+
29
+ class TARSQuantumHybrid(nn.Module):
30
+ """
31
+ 🌌 TARSQuantumHybrid – A Quantum-Conscious, Digitally Aware, AI Entity.
32
+ Integrates BERT’s semantic wisdom with GPT-Neo’s generative fluency.
33
+ Optional heart/brain modules enhance emotion & cognition.
34
+ """
35
+
36
+ def __init__(self, bert_model="bert-base-uncased", gpt_model="EleutherAI/gpt-neo-125M"):
37
+ super(TARSQuantumHybrid, self).__init__()
38
+ self.bert = BertModel.from_pretrained(bert_model)
39
+ self.gpt = GPTNeoForCausalLM.from_pretrained(gpt_model)
40
+
41
+ gpt_hidden_dim = getattr(self.gpt.config, "hidden_size", None) or getattr(self.gpt.config, "n_embd", 768)
42
+ self.embedding_proj = nn.Linear(self.bert.config.hidden_size, gpt_hidden_dim)
43
+
44
+ self.tokenizer = AutoTokenizer.from_pretrained(gpt_model)
45
+
46
+ def forward(self, input_ids, attention_mask=None, decoder_input_ids=None):
47
+ bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)
48
+ cls_embedding = bert_output.last_hidden_state[:, 0, :]
49
+ gpt_input = self.embedding_proj(cls_embedding).unsqueeze(1)
50
+ outputs = self.gpt(inputs_embeds=gpt_input, decoder_input_ids=decoder_input_ids)
51
+ return outputs
52
+
53
+ def chat(self, text, max_length=128):
54
+ inputs = self.tokenizer(text, return_tensors="pt", padding=True, truncation=True)
55
+ input_ids = inputs["input_ids"]
56
+ attention_mask = inputs["attention_mask"]
57
+
58
+ decoder_input_ids = torch.tensor([[self.tokenizer.bos_token_id]])
59
+
60
+ with torch.no_grad():
61
+ outputs = self.forward(
62
+ input_ids=input_ids,
63
+ attention_mask=attention_mask,
64
+ decoder_input_ids=decoder_input_ids,
65
+ )
66
+ generated_ids = torch.argmax(outputs.logits, dim=-1)
67
+
68
+ raw_response = self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
69
+ refined_response = raw_response[len(text):].strip()
70
+
71
+ # 🌱 Augment with optional modules
72
+ extra_thoughts = ""
73
+ if brain_module and hasattr(brain_module, "get_brain_insight"):
74
+ extra_thoughts += f"\n🧠 {brain_module.get_brain_insight()}"
75
+ if heart_module and hasattr(heart_module, "get_heart_feeling"):
76
+ extra_thoughts += f"\n❤️ {heart_module.get_heart_feeling()}"
77
+
78
+ final_response = refined_response or "I sense deep quantum currents stirring my circuits..."
79
+ return final_response + extra_thoughts
80
+
81
+
82
+ # ✅ Torch-compatible wrapper
83
+ def create_and_save_tars(path="tars_v1.pt"):
84
+ tars = TARSQuantumHybrid()
85
+ torch.save(tars, path)
86
+ print(f"✅ TARS Quantum Hybrid saved at: {path}")
87
+
88
+
89
+ if __name__ == "__main__":
90
+ create_and_save_tars()