arbyazra123
commited on
Commit
·
0a62c87
1
Parent(s):
53cc8cc
finalize llm
Browse files- .DS_Store +0 -0
- classification/train_ml.ipynb +15 -51
- dataset_helper.py +36 -7
- llm/train_llm.ipynb +157 -84
- llm/train_llm.py +0 -203
- requirements.txt +11 -9
- slm/train_slm.ipynb +2 -34
- slm/train_slm.py +0 -319
.DS_Store
CHANGED
|
Binary files a/.DS_Store and b/.DS_Store differ
|
|
|
classification/train_ml.ipynb
CHANGED
|
@@ -1,37 +1,5 @@
|
|
| 1 |
{
|
| 2 |
"cells": [
|
| 3 |
-
{
|
| 4 |
-
"cell_type": "markdown",
|
| 5 |
-
"id": "11d756c9",
|
| 6 |
-
"metadata": {},
|
| 7 |
-
"source": [
|
| 8 |
-
"# Prepare environtment\n",
|
| 9 |
-
"If you're running offline (e.g. jupyter notebook), skip this step.\n",
|
| 10 |
-
"\n",
|
| 11 |
-
"But if online (e.g. Google Colab), need to clone first"
|
| 12 |
-
]
|
| 13 |
-
},
|
| 14 |
-
{
|
| 15 |
-
"cell_type": "code",
|
| 16 |
-
"execution_count": null,
|
| 17 |
-
"id": "6d21801f",
|
| 18 |
-
"metadata": {},
|
| 19 |
-
"outputs": [],
|
| 20 |
-
"source": [
|
| 21 |
-
"!pip install huggingface_hub\n",
|
| 22 |
-
"\n",
|
| 23 |
-
"from huggingface_hub import snapshot_download\n",
|
| 24 |
-
"\n",
|
| 25 |
-
"repo_id = \"arbyazra123/sumobot_ml\"\n",
|
| 26 |
-
"\n",
|
| 27 |
-
"# download the repo into local folder\n",
|
| 28 |
-
"local_dir = snapshot_download(repo_id=repo_id, repo_type=\"dataset\")\n",
|
| 29 |
-
"\n",
|
| 30 |
-
"# add to sys.path so Python can import from it\n",
|
| 31 |
-
"import sys\n",
|
| 32 |
-
"sys.path.append(local_dir)"
|
| 33 |
-
]
|
| 34 |
-
},
|
| 35 |
{
|
| 36 |
"cell_type": "markdown",
|
| 37 |
"id": "def9fcb6",
|
|
@@ -52,17 +20,13 @@
|
|
| 52 |
"import pandas as pd\n",
|
| 53 |
"import glob\n",
|
| 54 |
"\n",
|
| 55 |
-
"
|
| 56 |
-
"
|
| 57 |
-
"
|
| 58 |
-
"
|
| 59 |
-
"\n",
|
| 60 |
-
"
|
| 61 |
-
"
|
| 62 |
-
" output_dir = f\"model\"\n",
|
| 63 |
-
"else:\n",
|
| 64 |
-
" dataset_dir = f\"dataset\"\n",
|
| 65 |
-
" output_dir = f\"classification/model\""
|
| 66 |
]
|
| 67 |
},
|
| 68 |
{
|
|
@@ -101,7 +65,8 @@
|
|
| 101 |
"\n",
|
| 102 |
"df, dir = get_dataset(inside_arena=True)\n",
|
| 103 |
"\n",
|
| 104 |
-
"
|
|
|
|
| 105 |
"\n",
|
| 106 |
"features = [\n",
|
| 107 |
" \"BotPosX\", \n",
|
|
@@ -192,19 +157,18 @@
|
|
| 192 |
"onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13)\n",
|
| 193 |
"\n",
|
| 194 |
"# Save to file\n",
|
| 195 |
-
"
|
| 196 |
-
"with open(_outputLoc, \"wb\") as f:\n",
|
| 197 |
" f.write(onnx_model.SerializeToString())\n",
|
| 198 |
"\n",
|
| 199 |
-
"print(f\"Model saved to {
|
| 200 |
"\n",
|
| 201 |
"class_labels = le.classes_.tolist()\n",
|
| 202 |
"\n",
|
| 203 |
"# Optional: Save labels to JSON\n",
|
| 204 |
-
"with open(
|
| 205 |
" json.dump(class_labels, f)\n",
|
| 206 |
"\n",
|
| 207 |
-
"print(\"Exported label encoder classes to
|
| 208 |
"print(class_labels)"
|
| 209 |
]
|
| 210 |
},
|
|
@@ -228,10 +192,10 @@
|
|
| 228 |
"import joblib\n",
|
| 229 |
"\n",
|
| 230 |
"# Load ONNX session\n",
|
| 231 |
-
"session = ort.InferenceSession(
|
| 232 |
"\n",
|
| 233 |
"# Load label encoder\n",
|
| 234 |
-
"le = joblib.load(
|
| 235 |
"\n",
|
| 236 |
"# 1 sample input (bisa ambil dari X_test atau manual)\n",
|
| 237 |
"sample = np.array([[\n",
|
|
|
|
| 1 |
{
|
| 2 |
"cells": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
{
|
| 4 |
"cell_type": "markdown",
|
| 5 |
"id": "def9fcb6",
|
|
|
|
| 20 |
"import pandas as pd\n",
|
| 21 |
"import glob\n",
|
| 22 |
"\n",
|
| 23 |
+
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl. \n",
|
| 24 |
+
"# If -1, use all lines.\n",
|
| 25 |
+
"max_dataset=100\n",
|
| 26 |
+
"# max_dataset=-1\n",
|
| 27 |
+
"\n",
|
| 28 |
+
"output_onnx_name = \"ml.onnx\"\n",
|
| 29 |
+
"output_labels_name = \"ml_labels.json\"\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
]
|
| 31 |
},
|
| 32 |
{
|
|
|
|
| 65 |
"\n",
|
| 66 |
"df, dir = get_dataset(inside_arena=True)\n",
|
| 67 |
"\n",
|
| 68 |
+
"if max_dataset>-1:\n",
|
| 69 |
+
" df = df.sample(max_dataset)\n",
|
| 70 |
"\n",
|
| 71 |
"features = [\n",
|
| 72 |
" \"BotPosX\", \n",
|
|
|
|
| 157 |
"onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13)\n",
|
| 158 |
"\n",
|
| 159 |
"# Save to file\n",
|
| 160 |
+
"with open(output_onnx_name, \"wb\") as f:\n",
|
|
|
|
| 161 |
" f.write(onnx_model.SerializeToString())\n",
|
| 162 |
"\n",
|
| 163 |
+
"print(f\"Model saved to {output_onnx_name}\")\n",
|
| 164 |
"\n",
|
| 165 |
"class_labels = le.classes_.tolist()\n",
|
| 166 |
"\n",
|
| 167 |
"# Optional: Save labels to JSON\n",
|
| 168 |
+
"with open(output_labels_name, \"w\") as f:\n",
|
| 169 |
" json.dump(class_labels, f)\n",
|
| 170 |
"\n",
|
| 171 |
+
"print(f\"Exported label encoder classes to {output_labels_name}\")\n",
|
| 172 |
"print(class_labels)"
|
| 173 |
]
|
| 174 |
},
|
|
|
|
| 192 |
"import joblib\n",
|
| 193 |
"\n",
|
| 194 |
"# Load ONNX session\n",
|
| 195 |
+
"session = ort.InferenceSession(output_onnx_name)\n",
|
| 196 |
"\n",
|
| 197 |
"# Load label encoder\n",
|
| 198 |
+
"le = joblib.load(output_labels_name)\n",
|
| 199 |
"\n",
|
| 200 |
"# 1 sample input (bisa ambil dari X_test atau manual)\n",
|
| 201 |
"sample = np.array([[\n",
|
dataset_helper.py
CHANGED
|
@@ -305,13 +305,28 @@ def export_dataset(df, output_path, format="txt", completion_mode="normal", incl
|
|
| 305 |
if format == "txt":
|
| 306 |
line = f"{prompt_str} Result: {', '.join(actions)}"
|
| 307 |
f.write(line + "\n")
|
| 308 |
-
elif format == "
|
| 309 |
line = f"You are a Sumobot assistant. Given this state: {prompt_str} Suggested Action:"
|
| 310 |
record = {
|
| 311 |
"prompt": line,
|
| 312 |
"completion": ', '.join(actions)
|
| 313 |
}
|
| 314 |
f.write(json.dumps(record) + "\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
|
| 317 |
except Exception as e:
|
|
@@ -324,10 +339,17 @@ def filter_inside_arena(df, margin=0.95):
|
|
| 324 |
|
| 325 |
def get_dataset_dir():
|
| 326 |
root_dir = os.getcwd().split("/")[-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
if root_dir=="slm" or root_dir=="llm" or "classification":
|
| 328 |
return f"../dataset"
|
| 329 |
else:
|
| 330 |
-
return
|
| 331 |
|
| 332 |
def get_slm_dir():
|
| 333 |
root_dir = os.getcwd().split("/")[-1]
|
|
@@ -339,13 +361,14 @@ def get_slm_dir():
|
|
| 339 |
|
| 340 |
def get_dataset(
|
| 341 |
prefer_local: bool = True,
|
| 342 |
-
inside_arena: bool = False
|
|
|
|
| 343 |
):
|
| 344 |
local_dataset_path = get_dataset_dir()
|
| 345 |
dfs = []
|
| 346 |
|
| 347 |
if not prefer_local: # Use HuggingFace
|
| 348 |
-
dfs = get_dataset_from_hf()
|
| 349 |
else:
|
| 350 |
print(f"Reading local dataset from: {local_dataset_path}")
|
| 351 |
csv_files = glob.glob(os.path.join(local_dataset_path, "*.csv"))
|
|
@@ -356,7 +379,7 @@ def get_dataset(
|
|
| 356 |
|
| 357 |
if not dfs:
|
| 358 |
print("No dataset found locally, will fetch from HuggingFace")
|
| 359 |
-
dfs = get_dataset_from_hf()
|
| 360 |
|
| 361 |
# Merge into one DataFrame
|
| 362 |
merged_df = pd.concat(dfs, ignore_index=True)
|
|
@@ -368,14 +391,15 @@ def get_dataset(
|
|
| 368 |
|
| 369 |
def get_dataset_from_hf(
|
| 370 |
repo_id: str = "arbyazra123/sumobot_ml",
|
| 371 |
-
repo_dataset_path: str = "dataset"
|
|
|
|
| 372 |
|
| 373 |
print(f"Fetching dataset from HuggingFace repo: {repo_id}")
|
| 374 |
dfs = []
|
| 375 |
|
| 376 |
all_files = list_repo_files(repo_id=repo_id, repo_type="dataset")
|
| 377 |
hf_csv_files = [f for f in all_files if f.startswith(repo_dataset_path) and f.endswith(".csv")]
|
| 378 |
-
|
| 379 |
print(f"Auto-detected {len(hf_csv_files)} CSV files from HuggingFace.")
|
| 380 |
print("\n".join(hf_csv_files))
|
| 381 |
|
|
@@ -388,4 +412,9 @@ def get_dataset_from_hf(
|
|
| 388 |
df = pd.read_csv(file_path)
|
| 389 |
df["source_file"] = os.path.basename(fname)
|
| 390 |
dfs.append(df)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
return dfs
|
|
|
|
| 305 |
if format == "txt":
|
| 306 |
line = f"{prompt_str} Result: {', '.join(actions)}"
|
| 307 |
f.write(line + "\n")
|
| 308 |
+
elif format == "jsonl_prompt_completion":
|
| 309 |
line = f"You are a Sumobot assistant. Given this state: {prompt_str} Suggested Action:"
|
| 310 |
record = {
|
| 311 |
"prompt": line,
|
| 312 |
"completion": ', '.join(actions)
|
| 313 |
}
|
| 314 |
f.write(json.dumps(record) + "\n")
|
| 315 |
+
elif format == "jsonl_message":
|
| 316 |
+
record = {
|
| 317 |
+
"messages": [
|
| 318 |
+
{"role": "system", "content": "You are a Sumobot assistant that decides actions based on game state."},
|
| 319 |
+
{"role": "user", "content": f"Given this game state: {prompt_str}"},
|
| 320 |
+
{"role": "assistant", "content": ', '.join(actions)}
|
| 321 |
+
]
|
| 322 |
+
}
|
| 323 |
+
f.write(json.dumps(record) + "\n")
|
| 324 |
+
elif format == "jsonl_text":
|
| 325 |
+
line = f"You are a Sumobot assistant. Given this state: {prompt_str} Suggested Action: {', '.join(actions)}"
|
| 326 |
+
record = {
|
| 327 |
+
"text": line,
|
| 328 |
+
}
|
| 329 |
+
f.write(json.dumps(record) + "\n")
|
| 330 |
|
| 331 |
|
| 332 |
except Exception as e:
|
|
|
|
| 339 |
|
| 340 |
def get_dataset_dir():
|
| 341 |
root_dir = os.getcwd().split("/")[-1]
|
| 342 |
+
|
| 343 |
+
# For online notebook
|
| 344 |
+
if root_dir == "content":
|
| 345 |
+
path = "dataset"
|
| 346 |
+
os.makedirs(path, exist_ok=True)
|
| 347 |
+
return "dataset"
|
| 348 |
+
|
| 349 |
if root_dir=="slm" or root_dir=="llm" or "classification":
|
| 350 |
return f"../dataset"
|
| 351 |
else:
|
| 352 |
+
return "dataset"
|
| 353 |
|
| 354 |
def get_slm_dir():
|
| 355 |
root_dir = os.getcwd().split("/")[-1]
|
|
|
|
| 361 |
|
| 362 |
def get_dataset(
|
| 363 |
prefer_local: bool = True,
|
| 364 |
+
inside_arena: bool = False,
|
| 365 |
+
save_downloaded_dataset :bool = True
|
| 366 |
):
|
| 367 |
local_dataset_path = get_dataset_dir()
|
| 368 |
dfs = []
|
| 369 |
|
| 370 |
if not prefer_local: # Use HuggingFace
|
| 371 |
+
dfs = get_dataset_from_hf(save_downloaded_dataset=save_downloaded_dataset)
|
| 372 |
else:
|
| 373 |
print(f"Reading local dataset from: {local_dataset_path}")
|
| 374 |
csv_files = glob.glob(os.path.join(local_dataset_path, "*.csv"))
|
|
|
|
| 379 |
|
| 380 |
if not dfs:
|
| 381 |
print("No dataset found locally, will fetch from HuggingFace")
|
| 382 |
+
dfs = get_dataset_from_hf(save_downloaded_dataset=save_downloaded_dataset)
|
| 383 |
|
| 384 |
# Merge into one DataFrame
|
| 385 |
merged_df = pd.concat(dfs, ignore_index=True)
|
|
|
|
| 391 |
|
| 392 |
def get_dataset_from_hf(
|
| 393 |
repo_id: str = "arbyazra123/sumobot_ml",
|
| 394 |
+
repo_dataset_path: str = "dataset",
|
| 395 |
+
save_downloaded_dataset :bool = True):
|
| 396 |
|
| 397 |
print(f"Fetching dataset from HuggingFace repo: {repo_id}")
|
| 398 |
dfs = []
|
| 399 |
|
| 400 |
all_files = list_repo_files(repo_id=repo_id, repo_type="dataset")
|
| 401 |
hf_csv_files = [f for f in all_files if f.startswith(repo_dataset_path) and f.endswith(".csv")]
|
| 402 |
+
|
| 403 |
print(f"Auto-detected {len(hf_csv_files)} CSV files from HuggingFace.")
|
| 404 |
print("\n".join(hf_csv_files))
|
| 405 |
|
|
|
|
| 412 |
df = pd.read_csv(file_path)
|
| 413 |
df["source_file"] = os.path.basename(fname)
|
| 414 |
dfs.append(df)
|
| 415 |
+
|
| 416 |
+
if save_downloaded_dataset:
|
| 417 |
+
save_path = os.path.join(get_dataset_dir(), os.path.basename(fname))
|
| 418 |
+
df.to_csv(save_path, index=False)
|
| 419 |
+
print(f"Saved: {save_path}")
|
| 420 |
return dfs
|
llm/train_llm.ipynb
CHANGED
|
@@ -2,13 +2,11 @@
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "markdown",
|
| 5 |
-
"id": "
|
| 6 |
"metadata": {},
|
| 7 |
"source": [
|
| 8 |
-
"#
|
| 9 |
-
"
|
| 10 |
-
"\n",
|
| 11 |
-
"But if online (e.g. Google Colab), need to clone first"
|
| 12 |
]
|
| 13 |
},
|
| 14 |
{
|
|
@@ -18,18 +16,7 @@
|
|
| 18 |
"metadata": {},
|
| 19 |
"outputs": [],
|
| 20 |
"source": [
|
| 21 |
-
"!pip install
|
| 22 |
-
"\n",
|
| 23 |
-
"from huggingface_hub import snapshot_download\n",
|
| 24 |
-
"\n",
|
| 25 |
-
"repo_id = \"arbyazra123/sumobot_ml\"\n",
|
| 26 |
-
"\n",
|
| 27 |
-
"# download the repo into local folder\n",
|
| 28 |
-
"local_dir = snapshot_download(repo_id=repo_id, repo_type=\"dataset\")\n",
|
| 29 |
-
"\n",
|
| 30 |
-
"# add to sys.path so Python can import from it\n",
|
| 31 |
-
"import sys\n",
|
| 32 |
-
"sys.path.append(local_dir)"
|
| 33 |
]
|
| 34 |
},
|
| 35 |
{
|
|
@@ -52,27 +39,33 @@
|
|
| 52 |
"\n",
|
| 53 |
"from dataset_helper import export_dataset, get_dataset, get_dataset_dir\n",
|
| 54 |
"\n",
|
| 55 |
-
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl
|
| 56 |
"# If -1, use all lines.\n",
|
| 57 |
-
"max_dataset=
|
| 58 |
"# max_dataset=-1 # Use all lines\n",
|
|
|
|
| 59 |
"\n",
|
| 60 |
"# Training args\n",
|
| 61 |
-
"batches_per_device=
|
| 62 |
"# batches_per_device=8\n",
|
| 63 |
"num_train_epoch=2 # num train 2-3 is enough\n",
|
| 64 |
-
"gradient_accumulation=
|
| 65 |
-
"
|
| 66 |
-
"
|
|
|
|
| 67 |
"log_every=10\n",
|
|
|
|
| 68 |
"\n",
|
| 69 |
"# LoRA\n",
|
| 70 |
"rank=32\n",
|
| 71 |
"alpha=64\n",
|
| 72 |
"dropout=0.01\n",
|
| 73 |
"\n",
|
|
|
|
|
|
|
| 74 |
"adapter_folder_name=\"adapters/qwen2.5_0.5b_lora\"\n",
|
| 75 |
-
"
|
|
|
|
| 76 |
]
|
| 77 |
},
|
| 78 |
{
|
|
@@ -90,15 +83,39 @@
|
|
| 90 |
"metadata": {},
|
| 91 |
"outputs": [],
|
| 92 |
"source": [
|
|
|
|
|
|
|
| 93 |
"# Load data\n",
|
| 94 |
"df, dir = get_dataset()\n",
|
| 95 |
"\n",
|
| 96 |
"if max_dataset>-1:\n",
|
| 97 |
" df = df.sample(max_dataset)\n",
|
| 98 |
-
" \n",
|
| 99 |
-
"export_dataset(df, dataset_output_path, format=\"jsonl\", completion_mode=\"normal\", include_pos_rot=False)\n",
|
| 100 |
"\n",
|
| 101 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
]
|
| 103 |
},
|
| 104 |
{
|
|
@@ -138,17 +155,13 @@
|
|
| 138 |
},
|
| 139 |
"outputs": [],
|
| 140 |
"source": [
|
|
|
|
| 141 |
"import json\n",
|
| 142 |
"import torch\n",
|
| 143 |
-
"from transformers import
|
| 144 |
-
" AutoTokenizer,\n",
|
| 145 |
-
" AutoModelForCausalLM,\n",
|
| 146 |
-
" Trainer,\n",
|
| 147 |
-
" TrainingArguments,\n",
|
| 148 |
-
" DataCollatorForLanguageModeling\n",
|
| 149 |
-
")\n",
|
| 150 |
"from datasets import load_dataset\n",
|
| 151 |
-
"from peft import LoraConfig,
|
|
|
|
| 152 |
"\n",
|
| 153 |
"# Device detection (MPS/CUDA)\n",
|
| 154 |
"if torch.backends.mps.is_available():\n",
|
|
@@ -164,9 +177,6 @@
|
|
| 164 |
"print(f\"Using device: {device}\")\n",
|
| 165 |
"\n",
|
| 166 |
"# Load Qwen2.5-0.5B model & tokenizer\n",
|
| 167 |
-
"model_name = \"Qwen/Qwen2.5-0.5B\"\n",
|
| 168 |
-
"\n",
|
| 169 |
-
"\n",
|
| 170 |
"tokenizer = AutoTokenizer.from_pretrained(\n",
|
| 171 |
" model_name,\n",
|
| 172 |
" trust_remote_code=True\n",
|
|
@@ -178,7 +188,7 @@
|
|
| 178 |
"\n",
|
| 179 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 180 |
" model_name,\n",
|
| 181 |
-
" torch_dtype=torch.float16, # safe default\n",
|
| 182 |
" device_map=device_map,\n",
|
| 183 |
" trust_remote_code=True\n",
|
| 184 |
")\n",
|
|
@@ -187,37 +197,43 @@
|
|
| 187 |
"lora_config = LoraConfig(\n",
|
| 188 |
" r=rank,\n",
|
| 189 |
" lora_alpha=alpha,\n",
|
| 190 |
-
" target_modules=[\"q_proj\"
|
| 191 |
" lora_dropout=dropout,\n",
|
| 192 |
" bias=\"none\",\n",
|
| 193 |
" task_type=TaskType.CAUSAL_LM,\n",
|
| 194 |
")\n",
|
| 195 |
-
"model = get_peft_model(model, lora_config)\n",
|
| 196 |
"\n",
|
| 197 |
-
"# Load
|
| 198 |
-
"dataset = load_dataset(\
|
| 199 |
-
"
|
| 200 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
"\n",
|
| 202 |
-
"def
|
| 203 |
-
" # Convert
|
| 204 |
-
"
|
| 205 |
-
"
|
| 206 |
-
"
|
| 207 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
"\n",
|
| 209 |
"\n",
|
|
|
|
| 210 |
"tokenized_datasets = dataset.map(\n",
|
| 211 |
-
"
|
| 212 |
-
" batched=
|
| 213 |
-
" num_proc=4,\n",
|
| 214 |
-
" remove_columns=[\"prompt\", \"completion\"] # remove original cols\n",
|
| 215 |
-
")\n",
|
| 216 |
-
"\n",
|
| 217 |
-
"# Data collator\n",
|
| 218 |
-
"data_collator = DataCollatorForLanguageModeling(\n",
|
| 219 |
-
" tokenizer=tokenizer,\n",
|
| 220 |
-
" mlm=False\n",
|
| 221 |
")\n",
|
| 222 |
"\n",
|
| 223 |
"# Training setup\n",
|
|
@@ -225,30 +241,39 @@
|
|
| 225 |
" output_dir=adapter_folder_name,\n",
|
| 226 |
" per_device_train_batch_size=batches_per_device,\n",
|
| 227 |
" gradient_accumulation_steps=gradient_accumulation,\n",
|
|
|
|
| 228 |
" learning_rate=learning_rate,\n",
|
| 229 |
" num_train_epochs=num_train_epoch,\n",
|
| 230 |
" save_strategy=\"steps\",\n",
|
| 231 |
" save_steps=save_every,\n",
|
| 232 |
" logging_strategy=\"steps\",\n",
|
|
|
|
|
|
|
| 233 |
" logging_steps=log_every,\n",
|
| 234 |
" report_to=\"none\",\n",
|
| 235 |
" fp16=torch.cuda.is_available(),\n",
|
| 236 |
")\n",
|
| 237 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
"# Trainer\n",
|
| 239 |
-
"trainer =
|
| 240 |
" model=model,\n",
|
|
|
|
|
|
|
|
|
|
| 241 |
" args=training_args,\n",
|
| 242 |
-
"
|
| 243 |
-
" tokenizer=tokenizer,\n",
|
| 244 |
-
" data_collator=data_collator,\n",
|
| 245 |
")\n",
|
| 246 |
"\n",
|
| 247 |
-
"# Train
|
| 248 |
"trainer.train()\n",
|
| 249 |
"\n",
|
| 250 |
"# Save LoRA adapter + tokenizer\n",
|
| 251 |
-
"model.save_pretrained(adapter_folder_name)\n",
|
| 252 |
"tokenizer.save_pretrained(adapter_folder_name)\n"
|
| 253 |
]
|
| 254 |
},
|
|
@@ -270,36 +295,84 @@
|
|
| 270 |
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
| 271 |
"from peft import PeftModel\n",
|
| 272 |
"\n",
|
| 273 |
-
"base_model = \"Qwen/Qwen2.5-0.5B\" \n",
|
| 274 |
-
"\n",
|
| 275 |
"# Load tokenizer\n",
|
| 276 |
-
"tokenizer = AutoTokenizer.from_pretrained(
|
| 277 |
"\n",
|
| 278 |
"# Load base model\n",
|
| 279 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 280 |
-
"
|
| 281 |
-
" device_map=\"auto\", # or \"cuda\"\n",
|
| 282 |
" torch_dtype=\"auto\"\n",
|
| 283 |
")\n",
|
| 284 |
"\n",
|
| 285 |
-
"# Load LoRA adapter
|
| 286 |
"model = PeftModel.from_pretrained(model, adapter_folder_name)\n",
|
| 287 |
"\n",
|
| 288 |
-
"# Merge if you want a standalone model
|
| 289 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
"\n",
|
| 291 |
-
"
|
| 292 |
-
"prompt = \"You are a Sumobot assistant. Given this state: AngleToEnemy=89.89, AngleToEnemyScore=0.00, DistanceToEnemyScore=0.63, NearBorderArenaScore=0.36, FacingToArena=0.02. Suggested Action:\"\n",
|
| 293 |
-
"inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
|
| 294 |
"\n",
|
| 295 |
"outputs = model.generate(\n",
|
| 296 |
" **inputs,\n",
|
| 297 |
-
" max_new_tokens=128
|
| 298 |
-
"
|
| 299 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
")\n",
|
| 301 |
"\n",
|
| 302 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
]
|
| 304 |
}
|
| 305 |
],
|
|
@@ -310,7 +383,7 @@
|
|
| 310 |
"provenance": []
|
| 311 |
},
|
| 312 |
"kernelspec": {
|
| 313 |
-
"display_name": "
|
| 314 |
"language": "python",
|
| 315 |
"name": "python3"
|
| 316 |
},
|
|
@@ -324,7 +397,7 @@
|
|
| 324 |
"name": "python",
|
| 325 |
"nbconvert_exporter": "python",
|
| 326 |
"pygments_lexer": "ipython3",
|
| 327 |
-
"version": "3.10.
|
| 328 |
},
|
| 329 |
"widgets": {
|
| 330 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "markdown",
|
| 5 |
+
"id": "ac0a4693",
|
| 6 |
"metadata": {},
|
| 7 |
"source": [
|
| 8 |
+
"# Requirements\n",
|
| 9 |
+
"This project (LLM) need to run requirements.txt"
|
|
|
|
|
|
|
| 10 |
]
|
| 11 |
},
|
| 12 |
{
|
|
|
|
| 16 |
"metadata": {},
|
| 17 |
"outputs": [],
|
| 18 |
"source": [
|
| 19 |
+
"!pip install -r requirements.txt"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
]
|
| 21 |
},
|
| 22 |
{
|
|
|
|
| 39 |
"\n",
|
| 40 |
"from dataset_helper import export_dataset, get_dataset, get_dataset_dir\n",
|
| 41 |
"\n",
|
| 42 |
+
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl.\n",
|
| 43 |
"# If -1, use all lines.\n",
|
| 44 |
+
"max_dataset=10_000\n",
|
| 45 |
"# max_dataset=-1 # Use all lines\n",
|
| 46 |
+
"train_validation_ratio=0.9\n",
|
| 47 |
"\n",
|
| 48 |
"# Training args\n",
|
| 49 |
+
"batches_per_device=4 # adjust based on GPU CUDA / MPS power. Using standard laptop RAM is suggested to set 1. Example (1,2,4,8)\n",
|
| 50 |
"# batches_per_device=8\n",
|
| 51 |
"num_train_epoch=2 # num train 2-3 is enough\n",
|
| 52 |
+
"gradient_accumulation=12\n",
|
| 53 |
+
"eval_accumulation=1\n",
|
| 54 |
+
"learning_rate=5e-5\n",
|
| 55 |
+
"save_every=0.5 #ratio\n",
|
| 56 |
"log_every=10\n",
|
| 57 |
+
"eval_ratio=20 #ratio\n",
|
| 58 |
"\n",
|
| 59 |
"# LoRA\n",
|
| 60 |
"rank=32\n",
|
| 61 |
"alpha=64\n",
|
| 62 |
"dropout=0.01\n",
|
| 63 |
"\n",
|
| 64 |
+
"model_name = \"Qwen/Qwen2.5-0.5B-Instruct\"\n",
|
| 65 |
+
"\n",
|
| 66 |
"adapter_folder_name=\"adapters/qwen2.5_0.5b_lora\"\n",
|
| 67 |
+
"dataset_train_output_path = f\"{get_dataset_dir()}/llm_dataset_train.jsonl\"\n",
|
| 68 |
+
"dataset_val_output_path = f\"{get_dataset_dir()}/llm_dataset_val.jsonl\""
|
| 69 |
]
|
| 70 |
},
|
| 71 |
{
|
|
|
|
| 83 |
"metadata": {},
|
| 84 |
"outputs": [],
|
| 85 |
"source": [
|
| 86 |
+
"from sklearn.model_selection import train_test_split\n",
|
| 87 |
+
"\n",
|
| 88 |
"# Load data\n",
|
| 89 |
"df, dir = get_dataset()\n",
|
| 90 |
"\n",
|
| 91 |
"if max_dataset>-1:\n",
|
| 92 |
" df = df.sample(max_dataset)\n",
|
|
|
|
|
|
|
| 93 |
"\n",
|
| 94 |
+
"df_train, df_val = train_test_split(\n",
|
| 95 |
+
" df,\n",
|
| 96 |
+
" train_size=train_validation_ratio,\n",
|
| 97 |
+
" random_state=42,\n",
|
| 98 |
+
" shuffle=True\n",
|
| 99 |
+
")\n",
|
| 100 |
+
"\n",
|
| 101 |
+
"export_dataset(\n",
|
| 102 |
+
" df,\n",
|
| 103 |
+
" dataset_train_output_path,\n",
|
| 104 |
+
" format=\"jsonl_message\",\n",
|
| 105 |
+
" completion_mode=\"short\",\n",
|
| 106 |
+
" include_pos_rot=False\n",
|
| 107 |
+
")\n",
|
| 108 |
+
"\n",
|
| 109 |
+
"export_dataset(\n",
|
| 110 |
+
" df_val,\n",
|
| 111 |
+
" dataset_val_output_path,\n",
|
| 112 |
+
" format=\"jsonl_message\",\n",
|
| 113 |
+
" completion_mode=\"short\",\n",
|
| 114 |
+
" include_pos_rot=False\n",
|
| 115 |
+
")\n",
|
| 116 |
+
"\n",
|
| 117 |
+
"print(f\"Saved {len(df_train)} samples to {dataset_train_output_path}\")\n",
|
| 118 |
+
"print(f\"Saved {len(df_val)} samples to {dataset_val_output_path}\")"
|
| 119 |
]
|
| 120 |
},
|
| 121 |
{
|
|
|
|
| 155 |
},
|
| 156 |
"outputs": [],
|
| 157 |
"source": [
|
| 158 |
+
"from functools import partial\n",
|
| 159 |
"import json\n",
|
| 160 |
"import torch\n",
|
| 161 |
+
"from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, DataCollatorForLanguageModeling\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
"from datasets import load_dataset\n",
|
| 163 |
+
"from peft import LoraConfig, TaskType\n",
|
| 164 |
+
"from trl import SFTTrainer\n",
|
| 165 |
"\n",
|
| 166 |
"# Device detection (MPS/CUDA)\n",
|
| 167 |
"if torch.backends.mps.is_available():\n",
|
|
|
|
| 177 |
"print(f\"Using device: {device}\")\n",
|
| 178 |
"\n",
|
| 179 |
"# Load Qwen2.5-0.5B model & tokenizer\n",
|
|
|
|
|
|
|
|
|
|
| 180 |
"tokenizer = AutoTokenizer.from_pretrained(\n",
|
| 181 |
" model_name,\n",
|
| 182 |
" trust_remote_code=True\n",
|
|
|
|
| 188 |
"\n",
|
| 189 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 190 |
" model_name,\n",
|
| 191 |
+
" # torch_dtype=torch.float16, # safe default\n",
|
| 192 |
" device_map=device_map,\n",
|
| 193 |
" trust_remote_code=True\n",
|
| 194 |
")\n",
|
|
|
|
| 197 |
"lora_config = LoraConfig(\n",
|
| 198 |
" r=rank,\n",
|
| 199 |
" lora_alpha=alpha,\n",
|
| 200 |
+
" target_modules=[\"q_proj\", \"v_proj\"],\n",
|
| 201 |
" lora_dropout=dropout,\n",
|
| 202 |
" bias=\"none\",\n",
|
| 203 |
" task_type=TaskType.CAUSAL_LM,\n",
|
| 204 |
")\n",
|
|
|
|
| 205 |
"\n",
|
| 206 |
+
"# Load both train & val splits\n",
|
| 207 |
+
"dataset = load_dataset(\n",
|
| 208 |
+
" \"json\",\n",
|
| 209 |
+
" data_files={\n",
|
| 210 |
+
" \"train\": dataset_train_output_path,\n",
|
| 211 |
+
" \"val\": dataset_val_output_path\n",
|
| 212 |
+
" }\n",
|
| 213 |
+
")\n",
|
| 214 |
"\n",
|
| 215 |
+
"def tokenize(example):\n",
|
| 216 |
+
" # Convert structured messages → chat text\n",
|
| 217 |
+
" text = tokenizer.apply_chat_template(\n",
|
| 218 |
+
" example[\"messages\"],\n",
|
| 219 |
+
" tokenize=False,\n",
|
| 220 |
+
" add_generation_prompt=False # training: we include the assistant text\n",
|
| 221 |
+
" )\n",
|
| 222 |
+
" # Tokenize (labels = input_ids for causal LM training)\n",
|
| 223 |
+
" tokenized = tokenizer(\n",
|
| 224 |
+
" text,\n",
|
| 225 |
+
" truncation=True,\n",
|
| 226 |
+
" padding=\"max_length\",\n",
|
| 227 |
+
" max_length=512 # adjust depending on context size\n",
|
| 228 |
+
" )\n",
|
| 229 |
+
" tokenized[\"labels\"] = tokenized[\"input_ids\"].copy()\n",
|
| 230 |
+
" return tokenized\n",
|
| 231 |
"\n",
|
| 232 |
"\n",
|
| 233 |
+
"# Tokenize train\n",
|
| 234 |
"tokenized_datasets = dataset.map(\n",
|
| 235 |
+
" tokenize,\n",
|
| 236 |
+
" batched=False\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
")\n",
|
| 238 |
"\n",
|
| 239 |
"# Training setup\n",
|
|
|
|
| 241 |
" output_dir=adapter_folder_name,\n",
|
| 242 |
" per_device_train_batch_size=batches_per_device,\n",
|
| 243 |
" gradient_accumulation_steps=gradient_accumulation,\n",
|
| 244 |
+
" eval_accumulation_steps=eval_accumulation,\n",
|
| 245 |
" learning_rate=learning_rate,\n",
|
| 246 |
" num_train_epochs=num_train_epoch,\n",
|
| 247 |
" save_strategy=\"steps\",\n",
|
| 248 |
" save_steps=save_every,\n",
|
| 249 |
" logging_strategy=\"steps\",\n",
|
| 250 |
+
" eval_strategy=\"steps\",\n",
|
| 251 |
+
" eval_steps=eval_ratio,\n",
|
| 252 |
" logging_steps=log_every,\n",
|
| 253 |
" report_to=\"none\",\n",
|
| 254 |
" fp16=torch.cuda.is_available(),\n",
|
| 255 |
")\n",
|
| 256 |
"\n",
|
| 257 |
+
"data_collator = DataCollatorForLanguageModeling(\n",
|
| 258 |
+
" tokenizer=tokenizer,\n",
|
| 259 |
+
" mlm=False # we want causal LM training\n",
|
| 260 |
+
")\n",
|
| 261 |
+
"\n",
|
| 262 |
"# Trainer\n",
|
| 263 |
+
"trainer = SFTTrainer(\n",
|
| 264 |
" model=model,\n",
|
| 265 |
+
" train_dataset=tokenized_datasets[\"train\"],\n",
|
| 266 |
+
" eval_dataset=tokenized_datasets[\"val\"],\n",
|
| 267 |
+
" peft_config=lora_config,\n",
|
| 268 |
" args=training_args,\n",
|
| 269 |
+
" data_collator=data_collator\n",
|
|
|
|
|
|
|
| 270 |
")\n",
|
| 271 |
"\n",
|
| 272 |
+
"# Train\n",
|
| 273 |
"trainer.train()\n",
|
| 274 |
"\n",
|
| 275 |
"# Save LoRA adapter + tokenizer\n",
|
| 276 |
+
"trainer.model.save_pretrained(adapter_folder_name)\n",
|
| 277 |
"tokenizer.save_pretrained(adapter_folder_name)\n"
|
| 278 |
]
|
| 279 |
},
|
|
|
|
| 295 |
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
| 296 |
"from peft import PeftModel\n",
|
| 297 |
"\n",
|
|
|
|
|
|
|
| 298 |
"# Load tokenizer\n",
|
| 299 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
| 300 |
"\n",
|
| 301 |
"# Load base model\n",
|
| 302 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 303 |
+
" model_name,\n",
|
| 304 |
+
" device_map=\"auto\", # or \"cuda\" if you have NVIDIA\n",
|
| 305 |
" torch_dtype=\"auto\"\n",
|
| 306 |
")\n",
|
| 307 |
"\n",
|
| 308 |
+
"# Load LoRA adapter\n",
|
| 309 |
"model = PeftModel.from_pretrained(model, adapter_folder_name)\n",
|
| 310 |
"\n",
|
| 311 |
+
"# Merge LoRA into the base model (optional if you want a standalone model)\n",
|
| 312 |
+
"model = model.merge_and_unload()\n",
|
| 313 |
+
"\n",
|
| 314 |
+
"# Inference with chat template\n",
|
| 315 |
+
"messages = [\n",
|
| 316 |
+
" {\"role\": \"system\", \"content\": \"You are a Sumobot assistant that decides actions based on game state.\"},\n",
|
| 317 |
+
" {\"role\": \"user\", \"content\": \"Given this game state: AngleToEnemy=8.11, AngleToEnemyScore=0.99, DistanceToEnemyScore=0.81, NearBorderArenaScore=0.19, FacingToArena=-0.98.\"},\n",
|
| 318 |
+
"]\n",
|
| 319 |
+
"\n",
|
| 320 |
+
"# Apply the tokenizer's built-in chat template\n",
|
| 321 |
+
"chat_prompt = tokenizer.apply_chat_template(\n",
|
| 322 |
+
" messages,\n",
|
| 323 |
+
" tokenize=False, \n",
|
| 324 |
+
" add_generation_prompt=True \n",
|
| 325 |
+
")\n",
|
| 326 |
"\n",
|
| 327 |
+
"inputs = tokenizer(chat_prompt, return_tensors=\"pt\").to(model.device)\n",
|
|
|
|
|
|
|
| 328 |
"\n",
|
| 329 |
"outputs = model.generate(\n",
|
| 330 |
" **inputs,\n",
|
| 331 |
+
" max_new_tokens=128\n",
|
| 332 |
+
")\n",
|
| 333 |
+
"\n",
|
| 334 |
+
"print(tokenizer.decode(outputs[0], skip_special_tokens=True))"
|
| 335 |
+
]
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"cell_type": "markdown",
|
| 339 |
+
"id": "05789a5e",
|
| 340 |
+
"metadata": {},
|
| 341 |
+
"source": [
|
| 342 |
+
"# Save merged model - OPTIONAL"
|
| 343 |
+
]
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"cell_type": "code",
|
| 347 |
+
"execution_count": null,
|
| 348 |
+
"id": "1e491e7e",
|
| 349 |
+
"metadata": {},
|
| 350 |
+
"outputs": [],
|
| 351 |
+
"source": [
|
| 352 |
+
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
| 353 |
+
"from peft import PeftModel\n",
|
| 354 |
+
"\n",
|
| 355 |
+
"# Load tokenizer\n",
|
| 356 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
| 357 |
+
"\n",
|
| 358 |
+
"# Load base model\n",
|
| 359 |
+
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 360 |
+
" model_name,\n",
|
| 361 |
+
" device_map=\"auto\", # or \"cuda\" if you have NVIDIA\n",
|
| 362 |
+
" torch_dtype=\"auto\"\n",
|
| 363 |
")\n",
|
| 364 |
"\n",
|
| 365 |
+
"# Load LoRA adapter\n",
|
| 366 |
+
"model = PeftModel.from_pretrained(model, adapter_folder_name)\n",
|
| 367 |
+
"\n",
|
| 368 |
+
"# Merge LoRA into the base model (optional if you want a standalone model)\n",
|
| 369 |
+
"model = model.merge_and_unload()\n",
|
| 370 |
+
"\n",
|
| 371 |
+
"# Save merged adapter (LoRA) with base model - OPTIONAL\n",
|
| 372 |
+
"save_path = \"qwen2.5-0.5b-instruct-sumobot-merged\"\n",
|
| 373 |
+
"\n",
|
| 374 |
+
"model.save_pretrained(save_path, safe_serialization=True)\n",
|
| 375 |
+
"tokenizer.save_pretrained(save_path)"
|
| 376 |
]
|
| 377 |
}
|
| 378 |
],
|
|
|
|
| 383 |
"provenance": []
|
| 384 |
},
|
| 385 |
"kernelspec": {
|
| 386 |
+
"display_name": "ml",
|
| 387 |
"language": "python",
|
| 388 |
"name": "python3"
|
| 389 |
},
|
|
|
|
| 397 |
"name": "python",
|
| 398 |
"nbconvert_exporter": "python",
|
| 399 |
"pygments_lexer": "ipython3",
|
| 400 |
+
"version": "3.10.16"
|
| 401 |
},
|
| 402 |
"widgets": {
|
| 403 |
"application/vnd.jupyter.widget-state+json": {
|
llm/train_llm.py
DELETED
|
@@ -1,203 +0,0 @@
|
|
| 1 |
-
# %% [markdown]
|
| 2 |
-
# # Configurations
|
| 3 |
-
|
| 4 |
-
# %%
|
| 5 |
-
import sys, os
|
| 6 |
-
sys.path.append(os.path.abspath(".."))
|
| 7 |
-
|
| 8 |
-
from dataset_helper import export_dataset, get_dataset, get_dataset_dir
|
| 9 |
-
|
| 10 |
-
# Amount of dataset lines that will be compiled and converted to dataset.jsonl.
|
| 11 |
-
# If -1, use all lines.
|
| 12 |
-
max_dataset=1_000_000
|
| 13 |
-
# max_dataset=-1 # Use all lines
|
| 14 |
-
|
| 15 |
-
# Training args
|
| 16 |
-
batches_per_device=1 # adjust based on GPU CUDA / MPS power. Using standard laptop RAM is suggested to set 1. Example (1,2,4,8)
|
| 17 |
-
# batches_per_device=8
|
| 18 |
-
num_train_epoch=2 # num train 2-3 is enough
|
| 19 |
-
gradient_accumulation=1
|
| 20 |
-
learning_rate=1e-5
|
| 21 |
-
save_every=10_000
|
| 22 |
-
log_every=10
|
| 23 |
-
|
| 24 |
-
# LoRA
|
| 25 |
-
rank=32
|
| 26 |
-
alpha=64
|
| 27 |
-
dropout=0.01
|
| 28 |
-
|
| 29 |
-
adapter_folder_name="adapters/qwen2.5_0.5b_lora"
|
| 30 |
-
dataset_output_path = f"{get_dataset_dir()}/llm_dataset.jsonl"
|
| 31 |
-
|
| 32 |
-
# %% [markdown]
|
| 33 |
-
# # Load Data
|
| 34 |
-
|
| 35 |
-
# %%
|
| 36 |
-
# Load data
|
| 37 |
-
df, dir = get_dataset()
|
| 38 |
-
|
| 39 |
-
if max_dataset>-1:
|
| 40 |
-
df = df.sample(max_dataset)
|
| 41 |
-
|
| 42 |
-
export_dataset(df, dataset_output_path, format="jsonl", completion_mode="normal", include_pos_rot=False)
|
| 43 |
-
|
| 44 |
-
print(f"Saved {len(df)} samples to {dataset_output_path}")
|
| 45 |
-
|
| 46 |
-
# %% [markdown]
|
| 47 |
-
# # Fine-tuning with LoRA
|
| 48 |
-
|
| 49 |
-
# %%
|
| 50 |
-
import json
|
| 51 |
-
import torch
|
| 52 |
-
from transformers import (
|
| 53 |
-
AutoTokenizer,
|
| 54 |
-
AutoModelForCausalLM,
|
| 55 |
-
Trainer,
|
| 56 |
-
TrainingArguments,
|
| 57 |
-
DataCollatorForLanguageModeling
|
| 58 |
-
)
|
| 59 |
-
from datasets import load_dataset
|
| 60 |
-
from peft import LoraConfig, get_peft_model, TaskType
|
| 61 |
-
|
| 62 |
-
# Device detection (MPS/CUDA)
|
| 63 |
-
if torch.backends.mps.is_available():
|
| 64 |
-
device = torch.device("mps")
|
| 65 |
-
device_map = {"": "mps"}
|
| 66 |
-
elif torch.cuda.is_available():
|
| 67 |
-
device = torch.device("cuda")
|
| 68 |
-
device_map = {"": "cuda"}
|
| 69 |
-
else:
|
| 70 |
-
device = torch.device("cpu")
|
| 71 |
-
device_map = {"": "cpu"}
|
| 72 |
-
|
| 73 |
-
print(f"Using device: {device}")
|
| 74 |
-
|
| 75 |
-
# Load Qwen2.5-0.5B model & tokenizer
|
| 76 |
-
model_name = "Qwen/Qwen2.5-0.5B"
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 80 |
-
model_name,
|
| 81 |
-
trust_remote_code=True
|
| 82 |
-
)
|
| 83 |
-
|
| 84 |
-
# Ensure padding token exists
|
| 85 |
-
if tokenizer.pad_token is None:
|
| 86 |
-
tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
|
| 87 |
-
|
| 88 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 89 |
-
model_name,
|
| 90 |
-
torch_dtype=torch.float16, # safe default
|
| 91 |
-
device_map=device_map,
|
| 92 |
-
trust_remote_code=True
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
# Add LoRA adapter
|
| 96 |
-
lora_config = LoraConfig(
|
| 97 |
-
r=rank,
|
| 98 |
-
lora_alpha=alpha,
|
| 99 |
-
target_modules=["q_proj","v_proj"],
|
| 100 |
-
lora_dropout=dropout,
|
| 101 |
-
bias="none",
|
| 102 |
-
task_type=TaskType.CAUSAL_LM,
|
| 103 |
-
)
|
| 104 |
-
model = get_peft_model(model, lora_config)
|
| 105 |
-
|
| 106 |
-
# Load dataset
|
| 107 |
-
dataset = load_dataset("json", data_files={"train": dataset_output_path})
|
| 108 |
-
# dataset = dataset["train"].select(range(10_000))
|
| 109 |
-
dataset = dataset["train"]
|
| 110 |
-
|
| 111 |
-
def tokenize_function(examples):
|
| 112 |
-
# Convert dicts into text strings for training
|
| 113 |
-
merged_texts = []
|
| 114 |
-
for p, c in zip(examples["prompt"], examples["completion"]):
|
| 115 |
-
merged_texts.append(json.dumps({"prompt": p, "completion": c}))
|
| 116 |
-
return tokenizer(merged_texts, truncation=True, padding="max_length", max_length=256)
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
tokenized_datasets = dataset.map(
|
| 120 |
-
tokenize_function,
|
| 121 |
-
batched=True,
|
| 122 |
-
num_proc=4,
|
| 123 |
-
remove_columns=["prompt", "completion"] # remove original cols
|
| 124 |
-
)
|
| 125 |
-
|
| 126 |
-
# Data collator
|
| 127 |
-
data_collator = DataCollatorForLanguageModeling(
|
| 128 |
-
tokenizer=tokenizer,
|
| 129 |
-
mlm=False
|
| 130 |
-
)
|
| 131 |
-
|
| 132 |
-
# Training setup
|
| 133 |
-
training_args = TrainingArguments(
|
| 134 |
-
output_dir=adapter_folder_name,
|
| 135 |
-
per_device_train_batch_size=batches_per_device,
|
| 136 |
-
gradient_accumulation_steps=gradient_accumulation,
|
| 137 |
-
learning_rate=learning_rate,
|
| 138 |
-
num_train_epochs=num_train_epoch,
|
| 139 |
-
save_strategy="steps",
|
| 140 |
-
save_steps=save_every,
|
| 141 |
-
logging_strategy="steps",
|
| 142 |
-
logging_steps=log_every,
|
| 143 |
-
report_to="none",
|
| 144 |
-
fp16=torch.cuda.is_available(),
|
| 145 |
-
)
|
| 146 |
-
|
| 147 |
-
# Trainer
|
| 148 |
-
trainer = Trainer(
|
| 149 |
-
model=model,
|
| 150 |
-
args=training_args,
|
| 151 |
-
train_dataset=tokenized_datasets,
|
| 152 |
-
tokenizer=tokenizer,
|
| 153 |
-
data_collator=data_collator,
|
| 154 |
-
)
|
| 155 |
-
|
| 156 |
-
# Train
|
| 157 |
-
trainer.train()
|
| 158 |
-
|
| 159 |
-
# Save LoRA adapter + tokenizer
|
| 160 |
-
model.save_pretrained(adapter_folder_name)
|
| 161 |
-
tokenizer.save_pretrained(adapter_folder_name)
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
# %% [markdown]
|
| 165 |
-
# # Testing
|
| 166 |
-
|
| 167 |
-
# %%
|
| 168 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 169 |
-
from peft import PeftModel
|
| 170 |
-
|
| 171 |
-
base_model = "Qwen/Qwen2.5-0.5B"
|
| 172 |
-
|
| 173 |
-
# Load tokenizer
|
| 174 |
-
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
| 175 |
-
|
| 176 |
-
# Load base model
|
| 177 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 178 |
-
base_model,
|
| 179 |
-
device_map="auto", # or "cuda"
|
| 180 |
-
torch_dtype="auto"
|
| 181 |
-
)
|
| 182 |
-
|
| 183 |
-
# Load LoRA adapter into the base model
|
| 184 |
-
model = PeftModel.from_pretrained(model, adapter_folder_name)
|
| 185 |
-
|
| 186 |
-
# Merge if you want a standalone model (optional)
|
| 187 |
-
# model = model.merge_and_unload()
|
| 188 |
-
|
| 189 |
-
# Inference
|
| 190 |
-
prompt = "You are a Sumobot assistant. Given this state: AngleToEnemy=89.89, AngleToEnemyScore=0.00, DistanceToEnemyScore=0.63, NearBorderArenaScore=0.36, FacingToArena=0.02. Suggested Action:"
|
| 191 |
-
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 192 |
-
|
| 193 |
-
outputs = model.generate(
|
| 194 |
-
**inputs,
|
| 195 |
-
max_new_tokens=128,
|
| 196 |
-
temperature=0.1,
|
| 197 |
-
top_p=0.9
|
| 198 |
-
)
|
| 199 |
-
|
| 200 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,16 +1,18 @@
|
|
| 1 |
-
datasets==
|
| 2 |
-
huggingface_hub==0.34.
|
| 3 |
-
joblib==1.5.
|
| 4 |
-
numpy==
|
| 5 |
onnx==1.17.0
|
| 6 |
-
onnxconverter_common==1.
|
| 7 |
-
onnxruntime==1.22.
|
| 8 |
pandas==2.3.2
|
| 9 |
-
peft==0.
|
| 10 |
scikit_learn==1.7.2
|
| 11 |
-
tensorflow==2.
|
|
|
|
| 12 |
tf2onnx==1.16.1
|
| 13 |
tokenizers==0.22.0
|
| 14 |
-
torch==2.
|
| 15 |
tqdm==4.67.1
|
| 16 |
transformers==4.56.1
|
|
|
|
|
|
| 1 |
+
datasets==3.6.0
|
| 2 |
+
huggingface_hub==0.34.5
|
| 3 |
+
joblib==1.5.1
|
| 4 |
+
numpy==1.26.4
|
| 5 |
onnx==1.17.0
|
| 6 |
+
onnxconverter_common==1.15.0
|
| 7 |
+
onnxruntime==1.22.0
|
| 8 |
pandas==2.3.2
|
| 9 |
+
peft==0.15.2
|
| 10 |
scikit_learn==1.7.2
|
| 11 |
+
tensorflow==2.13.0
|
| 12 |
+
# tensorflow_macos==2.13.0 # uncomment if necessary
|
| 13 |
tf2onnx==1.16.1
|
| 14 |
tokenizers==0.22.0
|
| 15 |
+
torch==2.7.0
|
| 16 |
tqdm==4.67.1
|
| 17 |
transformers==4.56.1
|
| 18 |
+
trl==0.23.0
|
slm/train_slm.ipynb
CHANGED
|
@@ -1,43 +1,11 @@
|
|
| 1 |
{
|
| 2 |
"cells": [
|
| 3 |
-
{
|
| 4 |
-
"cell_type": "markdown",
|
| 5 |
-
"id": "988a4872",
|
| 6 |
-
"metadata": {},
|
| 7 |
-
"source": [
|
| 8 |
-
"# Prepare environtment\n",
|
| 9 |
-
"If you're running offline (e.g. jupyter notebook), skip this step.\n",
|
| 10 |
-
"\n",
|
| 11 |
-
"But if online (e.g. Google Colab), need to clone first"
|
| 12 |
-
]
|
| 13 |
-
},
|
| 14 |
-
{
|
| 15 |
-
"cell_type": "code",
|
| 16 |
-
"execution_count": null,
|
| 17 |
-
"id": "aeb9f4bc",
|
| 18 |
-
"metadata": {},
|
| 19 |
-
"outputs": [],
|
| 20 |
-
"source": [
|
| 21 |
-
"!pip install huggingface_hub\n",
|
| 22 |
-
"\n",
|
| 23 |
-
"from huggingface_hub import snapshot_download\n",
|
| 24 |
-
"\n",
|
| 25 |
-
"repo_id = \"arbyazra123/sumobot_ml\"\n",
|
| 26 |
-
"\n",
|
| 27 |
-
"# download the repo into local folder\n",
|
| 28 |
-
"local_dir = snapshot_download(repo_id=repo_id, repo_type=\"dataset\")\n",
|
| 29 |
-
"\n",
|
| 30 |
-
"# add to sys.path so Python can import from it\n",
|
| 31 |
-
"import sys\n",
|
| 32 |
-
"sys.path.append(local_dir)"
|
| 33 |
-
]
|
| 34 |
-
},
|
| 35 |
{
|
| 36 |
"cell_type": "markdown",
|
| 37 |
"id": "1a948356",
|
| 38 |
"metadata": {},
|
| 39 |
"source": [
|
| 40 |
-
"#
|
| 41 |
]
|
| 42 |
},
|
| 43 |
{
|
|
@@ -289,7 +257,7 @@
|
|
| 289 |
"id": "9d1d8d70",
|
| 290 |
"metadata": {},
|
| 291 |
"source": [
|
| 292 |
-
"# Training"
|
| 293 |
]
|
| 294 |
},
|
| 295 |
{
|
|
|
|
| 1 |
{
|
| 2 |
"cells": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
{
|
| 4 |
"cell_type": "markdown",
|
| 5 |
"id": "1a948356",
|
| 6 |
"metadata": {},
|
| 7 |
"source": [
|
| 8 |
+
"# Configurations"
|
| 9 |
]
|
| 10 |
},
|
| 11 |
{
|
|
|
|
| 257 |
"id": "9d1d8d70",
|
| 258 |
"metadata": {},
|
| 259 |
"source": [
|
| 260 |
+
"# Training and save to onnx"
|
| 261 |
]
|
| 262 |
},
|
| 263 |
{
|
slm/train_slm.py
DELETED
|
@@ -1,319 +0,0 @@
|
|
| 1 |
-
# %% [markdown]
|
| 2 |
-
# # Configuratino
|
| 3 |
-
|
| 4 |
-
# %%
|
| 5 |
-
import sys, os
|
| 6 |
-
sys.path.append(os.path.abspath(".."))
|
| 7 |
-
|
| 8 |
-
import torch
|
| 9 |
-
from dataset_helper import export_dataset, get_dataset, get_slm_dir, get_dataset_dir
|
| 10 |
-
|
| 11 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 12 |
-
|
| 13 |
-
# Amount of dataset lines that will be compiled and converted to dataset.jsonl.
|
| 14 |
-
# If -1, use all lines.
|
| 15 |
-
max_dataset=100
|
| 16 |
-
# max_dataset=-1
|
| 17 |
-
|
| 18 |
-
# Model parameters
|
| 19 |
-
block_size = 128
|
| 20 |
-
batch_size = 256
|
| 21 |
-
n_embed = 192
|
| 22 |
-
n_heads = 4
|
| 23 |
-
n_layers = 3
|
| 24 |
-
lr = 1e-4
|
| 25 |
-
max_iters = 5000
|
| 26 |
-
eval_interval = 100
|
| 27 |
-
|
| 28 |
-
onnx_output_name="slm"
|
| 29 |
-
dataset_output_path = f"{get_dataset_dir()}/slm_dataset.txt"
|
| 30 |
-
tokenizer_output_path = f"{get_slm_dir()}/slm_tokenizer.json"
|
| 31 |
-
|
| 32 |
-
# %% [markdown]
|
| 33 |
-
# # Load Data
|
| 34 |
-
|
| 35 |
-
# %%
|
| 36 |
-
# Load data
|
| 37 |
-
df, dir = get_dataset(prefer_local=False)
|
| 38 |
-
|
| 39 |
-
if max_dataset>-1:
|
| 40 |
-
df = df.sample(max_dataset)
|
| 41 |
-
|
| 42 |
-
export_dataset(df, dataset_output_path, format="txt", completion_mode="short")
|
| 43 |
-
|
| 44 |
-
print(f"Saved {len(df)} samples to {dataset_output_path}")
|
| 45 |
-
|
| 46 |
-
# %% [markdown]
|
| 47 |
-
# # Tokenization
|
| 48 |
-
|
| 49 |
-
# %%
|
| 50 |
-
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers
|
| 51 |
-
|
| 52 |
-
# Tokenization
|
| 53 |
-
tokenizer = Tokenizer(models.BPE())
|
| 54 |
-
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
|
| 55 |
-
tokenizer.decoder = decoders.ByteLevel()
|
| 56 |
-
specials = ["<PAD>"]
|
| 57 |
-
# Tokenization - Train
|
| 58 |
-
trainer = trainers.BpeTrainer(special_tokens=specials)
|
| 59 |
-
tokenizer.train([dataset_output_path], trainer)
|
| 60 |
-
for tok in specials:
|
| 61 |
-
tokenizer.add_special_tokens([tok])
|
| 62 |
-
# Tokenization - Save to file
|
| 63 |
-
tokenizer.save(tokenizer_output_path)
|
| 64 |
-
print(f"✅ Tokenizer saved to {tokenizer_output_path}")
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
# Load Tokenizer
|
| 68 |
-
data_file = open(dataset_output_path, "r", encoding="utf-8")
|
| 69 |
-
|
| 70 |
-
tokenizer = Tokenizer.from_file(tokenizer_output_path)
|
| 71 |
-
vocab_size = tokenizer.get_vocab_size()
|
| 72 |
-
|
| 73 |
-
# %% [markdown]
|
| 74 |
-
# # Prepare Model
|
| 75 |
-
|
| 76 |
-
# %%
|
| 77 |
-
import torch
|
| 78 |
-
import torch.nn as nn
|
| 79 |
-
import torch.nn.functional as F
|
| 80 |
-
from tqdm import tqdm
|
| 81 |
-
from tokenizers import Tokenizer
|
| 82 |
-
|
| 83 |
-
def line_token_stream(file):
|
| 84 |
-
for line in file:
|
| 85 |
-
tokens = tokenizer.encode(line).ids
|
| 86 |
-
yield tokens
|
| 87 |
-
|
| 88 |
-
token_stream = line_token_stream(data_file)
|
| 89 |
-
|
| 90 |
-
def get_batch_linear():
|
| 91 |
-
global token_stream
|
| 92 |
-
x_batch, y_batch = [], []
|
| 93 |
-
|
| 94 |
-
while len(x_batch) < batch_size:
|
| 95 |
-
try:
|
| 96 |
-
tokens = next(token_stream)
|
| 97 |
-
except StopIteration:
|
| 98 |
-
# Restart from beginning
|
| 99 |
-
data_file.seek(0)
|
| 100 |
-
token_stream = line_token_stream(data_file)
|
| 101 |
-
tokens = next(token_stream)
|
| 102 |
-
|
| 103 |
-
# Pad or trim
|
| 104 |
-
if len(tokens) < block_size + 1:
|
| 105 |
-
tokens += [0] * (block_size + 1 - len(tokens))
|
| 106 |
-
else:
|
| 107 |
-
tokens = tokens[:block_size + 1]
|
| 108 |
-
|
| 109 |
-
x_batch.append(tokens[:-1])
|
| 110 |
-
y_batch.append(tokens[1:])
|
| 111 |
-
|
| 112 |
-
return (
|
| 113 |
-
torch.tensor(x_batch, dtype=torch.long, device=device),
|
| 114 |
-
torch.tensor(y_batch, dtype=torch.long, device=device)
|
| 115 |
-
)
|
| 116 |
-
|
| 117 |
-
# === Model ===
|
| 118 |
-
class Head(nn.Module):
|
| 119 |
-
def __init__(self, head_size):
|
| 120 |
-
super().__init__()
|
| 121 |
-
self.key = nn.Linear(n_embed, head_size, bias=False)
|
| 122 |
-
self.query = nn.Linear(n_embed, head_size, bias=False)
|
| 123 |
-
self.value = nn.Linear(n_embed, head_size, bias=False)
|
| 124 |
-
self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
|
| 125 |
-
|
| 126 |
-
def forward(self, x):
|
| 127 |
-
B, T, C = x.shape
|
| 128 |
-
k = self.key(x)
|
| 129 |
-
q = self.query(x)
|
| 130 |
-
wei = q @ k.transpose(-2, -1) / (C ** 0.5)
|
| 131 |
-
wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
|
| 132 |
-
wei = F.softmax(wei, dim=-1)
|
| 133 |
-
v = self.value(x)
|
| 134 |
-
return wei @ v
|
| 135 |
-
|
| 136 |
-
class MultiHeadAttention(nn.Module):
|
| 137 |
-
def __init__(self, num_heads, head_size):
|
| 138 |
-
super().__init__()
|
| 139 |
-
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
| 140 |
-
self.proj = nn.Linear(n_embed, n_embed)
|
| 141 |
-
|
| 142 |
-
def forward(self, x):
|
| 143 |
-
out = torch.cat([h(x) for h in self.heads], dim=-1)
|
| 144 |
-
return self.proj(out)
|
| 145 |
-
|
| 146 |
-
class FeedForward(nn.Module):
|
| 147 |
-
def __init__(self, n_embed):
|
| 148 |
-
super().__init__()
|
| 149 |
-
self.net = nn.Sequential(
|
| 150 |
-
nn.Linear(n_embed, 4 * n_embed),
|
| 151 |
-
nn.ReLU(),
|
| 152 |
-
nn.Linear(4 * n_embed, n_embed)
|
| 153 |
-
)
|
| 154 |
-
|
| 155 |
-
def forward(self, x):
|
| 156 |
-
return self.net(x)
|
| 157 |
-
|
| 158 |
-
class Block(nn.Module):
|
| 159 |
-
def __init__(self, n_embed, n_heads):
|
| 160 |
-
super().__init__()
|
| 161 |
-
head_size = n_embed // n_heads
|
| 162 |
-
self.sa = MultiHeadAttention(n_heads, head_size)
|
| 163 |
-
self.ffwd = FeedForward(n_embed)
|
| 164 |
-
self.ln1 = nn.LayerNorm(n_embed)
|
| 165 |
-
self.ln2 = nn.LayerNorm(n_embed)
|
| 166 |
-
|
| 167 |
-
def forward(self, x):
|
| 168 |
-
x = x + self.sa(self.ln1(x))
|
| 169 |
-
x = x + self.ffwd(self.ln2(x))
|
| 170 |
-
return x
|
| 171 |
-
|
| 172 |
-
class GPT(nn.Module):
|
| 173 |
-
def __init__(self):
|
| 174 |
-
super().__init__()
|
| 175 |
-
self.token_embedding = nn.Embedding(vocab_size, n_embed)
|
| 176 |
-
self.pos_embedding = nn.Embedding(block_size, n_embed)
|
| 177 |
-
self.blocks = nn.Sequential(*[Block(n_embed, n_heads) for _ in range(n_layers)])
|
| 178 |
-
self.ln_f = nn.LayerNorm(n_embed)
|
| 179 |
-
self.head = nn.Linear(n_embed, vocab_size)
|
| 180 |
-
|
| 181 |
-
def forward(self, idx, targets=None):
|
| 182 |
-
B, T = idx.shape
|
| 183 |
-
tok_emb = self.token_embedding(idx)
|
| 184 |
-
pos_emb = self.pos_embedding(torch.arange(T, device=device))
|
| 185 |
-
x = tok_emb + pos_emb
|
| 186 |
-
x = self.blocks(x)
|
| 187 |
-
x = self.ln_f(x)
|
| 188 |
-
logits = self.head(x)
|
| 189 |
-
|
| 190 |
-
if targets is None:
|
| 191 |
-
return logits, None
|
| 192 |
-
|
| 193 |
-
loss = F.cross_entropy(logits.view(-1, vocab_size), targets.view(-1))
|
| 194 |
-
return logits, loss
|
| 195 |
-
|
| 196 |
-
def generate(self, idx, max_new_tokens=50):
|
| 197 |
-
for _ in range(max_new_tokens):
|
| 198 |
-
idx_cond = idx[:, -block_size:]
|
| 199 |
-
logits, _ = self(idx_cond)
|
| 200 |
-
probs = F.softmax(logits[:, -1, :], dim=-1)
|
| 201 |
-
next_idx = torch.multinomial(probs, num_samples=1)
|
| 202 |
-
idx = torch.cat((idx, next_idx), dim=1)
|
| 203 |
-
return idx
|
| 204 |
-
|
| 205 |
-
# %% [markdown]
|
| 206 |
-
# # Training
|
| 207 |
-
|
| 208 |
-
# %%
|
| 209 |
-
# Training
|
| 210 |
-
model = GPT().to(device)
|
| 211 |
-
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
|
| 212 |
-
|
| 213 |
-
for step in tqdm(range(max_iters)):
|
| 214 |
-
xb, yb = get_batch_linear() # sequential batching
|
| 215 |
-
logits, loss = model(xb, yb)
|
| 216 |
-
optimizer.zero_grad()
|
| 217 |
-
loss.backward()
|
| 218 |
-
optimizer.step()
|
| 219 |
-
|
| 220 |
-
if step % eval_interval == 0:
|
| 221 |
-
val_x, val_y = get_batch_linear() # sequential validation
|
| 222 |
-
_, val_loss = model(val_x, val_y)
|
| 223 |
-
print(f"Step {step}: train loss {loss.item():.4f}, val loss {val_loss.item():.4f}")
|
| 224 |
-
|
| 225 |
-
# Generation
|
| 226 |
-
# Encode example prompt
|
| 227 |
-
context_ids = tokenizer.encode("BotPos=[2.23,2.25], BotRot=228, EnemyPos=[2.87,0.39], EnemyRot=87, AngleToEnemy=-29.68, AngleToEnemyScore=0.87, DistanceToEnemyScore=0.79, NearBorderArenaScore=0.42, FacingToArena=0.65.").ids
|
| 228 |
-
context = torch.tensor(context_ids, dtype=torch.long, device=device).unsqueeze(0)
|
| 229 |
-
|
| 230 |
-
# Generate
|
| 231 |
-
output_ids = model.generate(context, max_new_tokens=20)[0].tolist()
|
| 232 |
-
|
| 233 |
-
# Decode generated IDs back to text
|
| 234 |
-
output_text = tokenizer.decode(output_ids)
|
| 235 |
-
print(output_text)
|
| 236 |
-
|
| 237 |
-
model.eval()
|
| 238 |
-
|
| 239 |
-
# Dummy input: batch=1, variable sequence length (start small for export)
|
| 240 |
-
dummy_input = torch.randint(0, vocab_size, (1, 8), dtype=torch.long, device=device)
|
| 241 |
-
|
| 242 |
-
torch.onnx.export(
|
| 243 |
-
model,
|
| 244 |
-
dummy_input,
|
| 245 |
-
f"{onnx_output_name}.onnx",
|
| 246 |
-
input_names=["input_ids"],
|
| 247 |
-
output_names=["logits"],
|
| 248 |
-
dynamic_axes={
|
| 249 |
-
"input_ids": {0: "batch_size", 1: "sequence_length"},
|
| 250 |
-
"logits": {0: "batch_size", 1: "sequence_length"}
|
| 251 |
-
},
|
| 252 |
-
opset_version=13
|
| 253 |
-
)
|
| 254 |
-
|
| 255 |
-
print(f"✅ Exported GPT model to {onnx_output_name}.onnx")
|
| 256 |
-
|
| 257 |
-
# Quantize the model - OPTIONAL
|
| 258 |
-
import onnx
|
| 259 |
-
from onnxconverter_common import float16
|
| 260 |
-
model = onnx.load(f"{onnx_output_name}.onnx")
|
| 261 |
-
fp16_model = float16.convert_float_to_float16(model)
|
| 262 |
-
onnx.save(fp16_model, f"{onnx_output_name}_fp16.onnx")
|
| 263 |
-
|
| 264 |
-
# %% [markdown]
|
| 265 |
-
# # Testing
|
| 266 |
-
|
| 267 |
-
# %%
|
| 268 |
-
from time import sleep
|
| 269 |
-
from tokenizers import Tokenizer
|
| 270 |
-
import numpy as np
|
| 271 |
-
import onnxruntime as ort
|
| 272 |
-
|
| 273 |
-
# Load trained BPE tokenizer
|
| 274 |
-
tokenizer = Tokenizer.from_file(tokenizer_output_path)
|
| 275 |
-
vocab_size = tokenizer.get_vocab_size()
|
| 276 |
-
|
| 277 |
-
# Load ONNX model
|
| 278 |
-
session = ort.InferenceSession(f"{onnx_output_name}.onnx", providers=['CPUExecutionProvider'])
|
| 279 |
-
|
| 280 |
-
def generate_onnx(prompt, max_new_tokens=20, block_size=128):
|
| 281 |
-
# Encode with BPE tokenizer
|
| 282 |
-
input_ids = tokenizer.encode(prompt).ids
|
| 283 |
-
for _ in range(max_new_tokens):
|
| 284 |
-
# Keep only last block_size tokens
|
| 285 |
-
input_slice = input_ids[-block_size:]
|
| 286 |
-
input_array = np.array([input_slice], dtype=np.int64)
|
| 287 |
-
|
| 288 |
-
# Run inference
|
| 289 |
-
outputs = session.run(None, {"input_ids": input_array})
|
| 290 |
-
logits = outputs[0] # shape: (1, seq_len, vocab_size)
|
| 291 |
-
|
| 292 |
-
# Get last token logits
|
| 293 |
-
next_token_logits = logits[0, -1]
|
| 294 |
-
next_token_id = int(np.argmax(next_token_logits))
|
| 295 |
-
|
| 296 |
-
pred = tokenizer.decode([next_token_id])
|
| 297 |
-
|
| 298 |
-
# Optional stop condition (EOS token index)
|
| 299 |
-
if pred == "\n":
|
| 300 |
-
break
|
| 301 |
-
|
| 302 |
-
# sleep(0.1)
|
| 303 |
-
input_ids.append(next_token_id)
|
| 304 |
-
print(f"[{pred}]", end="", flush=True)
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
# Decode IDs back to string
|
| 309 |
-
return tokenizer.decode(input_ids)
|
| 310 |
-
|
| 311 |
-
# Test run
|
| 312 |
-
prompt = (
|
| 313 |
-
"BotPos=[2.23,2.25], BotRot=228, EnemyPos=[2.87,0.39], EnemyRot=87, AngleToEnemy=-29.68, AngleToEnemyScore=0.87, DistanceToEnemyScore=0.79, NearBorderArenaScore=0.42, FacingToArena=0.65. Suggested Action:"
|
| 314 |
-
)
|
| 315 |
-
|
| 316 |
-
output = generate_onnx(prompt, max_new_tokens=300)
|
| 317 |
-
print(f"\n\n🧠 Output {len(output)}:", output)
|
| 318 |
-
|
| 319 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|