arby-pc-lab
commited on
Commit
·
e9c2363
1
Parent(s):
e62a698
add vdb, train_llm
Browse files- .gitattributes +3 -2
- .gitignore +2 -0
- __pycache__/dataset_helper.cpython-312.pyc +0 -0
- classification/train_ml.ipynb +30 -8
- dataset_helper.py +97 -1
- llm/api.py +24 -15
- llm/api_gguf.py +213 -0
- llm/api_mlx.py +3 -3
- llm/complete_dataset.jsonl +3 -0
- llm/convert_to_gguf.py +235 -0
- llm/dataset_prompt.py +378 -0
- llm/expand_dataset.py +159 -0
- llm/testng.py +71 -0
- llm/train_llm.ipynb +137 -61
- llm/train_llm.py +321 -0
- requirements.txt +19 -17
- slm/train_slm.ipynb +136 -9
- vdb/cleaned_dataset.jsonl +3 -0
- vdb/dataset_cleaner.py +241 -0
- vdb/import_data.py +1 -1
.gitattributes
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
*.csv filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
|
| 3 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
@@ -58,4 +58,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 59 |
# Video files - compressed
|
| 60 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 61 |
-
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1 |
*.csv filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 4 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 58 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 59 |
# Video files - compressed
|
| 60 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
llm/complete_dataset.jsonl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
venv
|
| 2 |
+
temp_data
|
__pycache__/dataset_helper.cpython-312.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
classification/train_ml.ipynb
CHANGED
|
@@ -10,23 +10,45 @@
|
|
| 10 |
},
|
| 11 |
{
|
| 12 |
"cell_type": "code",
|
| 13 |
-
"execution_count":
|
| 14 |
"id": "8bb0209c-63c4-4601-894c-0ded8f4db2e6",
|
| 15 |
"metadata": {},
|
| 16 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
"source": [
|
| 18 |
-
"import os\n",
|
|
|
|
|
|
|
| 19 |
"import numpy as np\n",
|
| 20 |
"import pandas as pd\n",
|
| 21 |
"import glob\n",
|
|
|
|
|
|
|
| 22 |
"\n",
|
| 23 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl. \n",
|
| 24 |
"# If -1, use all lines.\n",
|
| 25 |
-
"max_dataset=100\n",
|
| 26 |
-
"
|
| 27 |
"\n",
|
| 28 |
"output_onnx_name = \"ml.onnx\"\n",
|
| 29 |
-
"output_labels_name = \"ml_labels.json\"\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
]
|
| 31 |
},
|
| 32 |
{
|
|
@@ -231,7 +253,7 @@
|
|
| 231 |
],
|
| 232 |
"metadata": {
|
| 233 |
"kernelspec": {
|
| 234 |
-
"display_name": "
|
| 235 |
"language": "python",
|
| 236 |
"name": "python3"
|
| 237 |
},
|
|
@@ -245,7 +267,7 @@
|
|
| 245 |
"name": "python",
|
| 246 |
"nbconvert_exporter": "python",
|
| 247 |
"pygments_lexer": "ipython3",
|
| 248 |
-
"version": "3.10
|
| 249 |
}
|
| 250 |
},
|
| 251 |
"nbformat": 4,
|
|
|
|
| 10 |
},
|
| 11 |
{
|
| 12 |
"cell_type": "code",
|
| 13 |
+
"execution_count": null,
|
| 14 |
"id": "8bb0209c-63c4-4601-894c-0ded8f4db2e6",
|
| 15 |
"metadata": {},
|
| 16 |
+
"outputs": [
|
| 17 |
+
{
|
| 18 |
+
"name": "stderr",
|
| 19 |
+
"output_type": "stream",
|
| 20 |
+
"text": [
|
| 21 |
+
"d:\\sumobot\\sumobot_ml\\venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
| 22 |
+
" from .autonotebook import tqdm as notebook_tqdm\n"
|
| 23 |
+
]
|
| 24 |
+
}
|
| 25 |
+
],
|
| 26 |
"source": [
|
| 27 |
+
"import sys, os\n",
|
| 28 |
+
"sys.path.append(os.path.abspath(\"..\"))\n",
|
| 29 |
+
"\n",
|
| 30 |
"import numpy as np\n",
|
| 31 |
"import pandas as pd\n",
|
| 32 |
"import glob\n",
|
| 33 |
+
"from dataset_helper import get_dataset, get_dataset_dir\n",
|
| 34 |
+
"\n",
|
| 35 |
"\n",
|
| 36 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl. \n",
|
| 37 |
"# If -1, use all lines.\n",
|
| 38 |
+
"# max_dataset=100\n",
|
| 39 |
+
"max_dataset=-1\n",
|
| 40 |
"\n",
|
| 41 |
"output_onnx_name = \"ml.onnx\"\n",
|
| 42 |
+
"output_labels_name = \"ml_labels.json\"\n",
|
| 43 |
+
"\n",
|
| 44 |
+
"# Load & process data\n",
|
| 45 |
+
"\n",
|
| 46 |
+
"df, dir = get_dataset(inside_arena=True)\n",
|
| 47 |
+
"\n",
|
| 48 |
+
"# df.to_csv(f\"{get_dataset_dir()}/merged.csv\", index=False)\n",
|
| 49 |
+
"\n",
|
| 50 |
+
"if max_dataset>-1:\n",
|
| 51 |
+
" df = df.sample(max_dataset)\n"
|
| 52 |
]
|
| 53 |
},
|
| 54 |
{
|
|
|
|
| 253 |
],
|
| 254 |
"metadata": {
|
| 255 |
"kernelspec": {
|
| 256 |
+
"display_name": "venv",
|
| 257 |
"language": "python",
|
| 258 |
"name": "python3"
|
| 259 |
},
|
|
|
|
| 267 |
"name": "python",
|
| 268 |
"nbconvert_exporter": "python",
|
| 269 |
"pygments_lexer": "ipython3",
|
| 270 |
+
"version": "3.12.10"
|
| 271 |
}
|
| 272 |
},
|
| 273 |
"nbformat": 4,
|
dataset_helper.py
CHANGED
|
@@ -322,11 +322,17 @@ def export_dataset(df, output_path, format="txt", completion_mode="normal", incl
|
|
| 322 |
}
|
| 323 |
f.write(json.dumps(record) + "\n")
|
| 324 |
elif format == "jsonl_text":
|
| 325 |
-
line = f"
|
| 326 |
record = {
|
| 327 |
"text": line,
|
| 328 |
}
|
| 329 |
f.write(json.dumps(record) + "\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
|
| 332 |
except Exception as e:
|
|
@@ -373,6 +379,8 @@ def get_dataset(
|
|
| 373 |
print(f"Reading local dataset from: {local_dataset_path}")
|
| 374 |
csv_files = glob.glob(os.path.join(local_dataset_path, "*.csv"))
|
| 375 |
for fname in csv_files:
|
|
|
|
|
|
|
| 376 |
df = pd.read_csv(fname)
|
| 377 |
df["source_file"] = os.path.basename(fname)
|
| 378 |
dfs.append(df)
|
|
@@ -418,3 +426,91 @@ def get_dataset_from_hf(
|
|
| 418 |
df.to_csv(save_path, index=False)
|
| 419 |
print(f"Saved: {save_path}")
|
| 420 |
return dfs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
}
|
| 323 |
f.write(json.dumps(record) + "\n")
|
| 324 |
elif format == "jsonl_text":
|
| 325 |
+
line = f"<INST> state: {prompt_str}</INST> {', '.join(actions)}"
|
| 326 |
record = {
|
| 327 |
"text": line,
|
| 328 |
}
|
| 329 |
f.write(json.dumps(record) + "\n")
|
| 330 |
+
elif format == "state_action":
|
| 331 |
+
record = {
|
| 332 |
+
"state": prompt_str,
|
| 333 |
+
"action": ', '.join(actions),
|
| 334 |
+
}
|
| 335 |
+
f.write(json.dumps(record) + "\n")
|
| 336 |
|
| 337 |
|
| 338 |
except Exception as e:
|
|
|
|
| 379 |
print(f"Reading local dataset from: {local_dataset_path}")
|
| 380 |
csv_files = glob.glob(os.path.join(local_dataset_path, "*.csv"))
|
| 381 |
for fname in csv_files:
|
| 382 |
+
if not os.path.basename(fname).startswith("game_logs"):
|
| 383 |
+
continue
|
| 384 |
df = pd.read_csv(fname)
|
| 385 |
df["source_file"] = os.path.basename(fname)
|
| 386 |
dfs.append(df)
|
|
|
|
| 426 |
df.to_csv(save_path, index=False)
|
| 427 |
print(f"Saved: {save_path}")
|
| 428 |
return dfs
|
| 429 |
+
|
| 430 |
+
def sample_dataset(
|
| 431 |
+
input_file: str,
|
| 432 |
+
train_output: str,
|
| 433 |
+
val_output: str,
|
| 434 |
+
max_lines: int,
|
| 435 |
+
action_ratio: dict[str, float],
|
| 436 |
+
train_ratio: float = 0.85,
|
| 437 |
+
):
|
| 438 |
+
|
| 439 |
+
df = pd.read_json(input_file, lines=True)
|
| 440 |
+
|
| 441 |
+
lines = (df.shape[0] if max_lines == -1 else max_lines)
|
| 442 |
+
print(lines)
|
| 443 |
+
|
| 444 |
+
if max_lines ==-1 or action_ratio is None:
|
| 445 |
+
split_idx = int(len(df) * train_ratio)
|
| 446 |
+
train_df = df.iloc[:split_idx]
|
| 447 |
+
val_df = df.iloc[split_idx:]
|
| 448 |
+
|
| 449 |
+
# Save back to JSONL
|
| 450 |
+
train_df.to_json(train_output, orient="records", lines=True, force_ascii=False)
|
| 451 |
+
val_df.to_json(val_output, orient="records", lines=True, force_ascii=False)
|
| 452 |
+
|
| 453 |
+
print(f"Saved {len(train_df)} samples to {train_output}")
|
| 454 |
+
print(f"Saved {len(val_df)} samples to {val_output}")
|
| 455 |
+
return
|
| 456 |
+
|
| 457 |
+
# Shuffle
|
| 458 |
+
df = df.sample(frac=1, random_state=42).reset_index(drop=True)
|
| 459 |
+
|
| 460 |
+
# --- Detect format ---
|
| 461 |
+
if "messages" in df.columns:
|
| 462 |
+
# Extract assistant content from messages
|
| 463 |
+
df["assistant_content"] = df["messages"].apply(get_assistant_content)
|
| 464 |
+
elif "text" in df.columns:
|
| 465 |
+
# Use raw text directly
|
| 466 |
+
df["assistant_content"] = df["text"]
|
| 467 |
+
elif "completion" in df.columns:
|
| 468 |
+
df["assistant_content"] = df["completion"]
|
| 469 |
+
elif "action" in df.columns:
|
| 470 |
+
df["assistant_content"] = df["action"]
|
| 471 |
+
else:
|
| 472 |
+
raise ValueError("Dataset must contain either 'messages' or 'text' column")
|
| 473 |
+
|
| 474 |
+
# Buckets by action
|
| 475 |
+
# For each action, filter rows where assistant_content contains it
|
| 476 |
+
buckets = {
|
| 477 |
+
k: df[df["assistant_content"].str.contains(k, na=False)]
|
| 478 |
+
for k in action_ratio.keys()
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
# Calculate target samples per action
|
| 482 |
+
total = sum(action_ratio.values())
|
| 483 |
+
targets = {k: int(action_ratio[k] / total * lines) for k in action_ratio}
|
| 484 |
+
|
| 485 |
+
# Collect balanced samples
|
| 486 |
+
selected_dfs = []
|
| 487 |
+
for action, bucket in buckets.items():
|
| 488 |
+
selected_dfs.append(bucket.sample(
|
| 489 |
+
n=min(len(bucket), targets[action]),
|
| 490 |
+
random_state=42
|
| 491 |
+
))
|
| 492 |
+
|
| 493 |
+
# Merge and shuffle again
|
| 494 |
+
selected_df = pd.concat(selected_dfs).sample(frac=1, random_state=42).reset_index(drop=True)
|
| 495 |
+
|
| 496 |
+
# Trim to max_lines
|
| 497 |
+
selected_df = selected_df.head(lines)
|
| 498 |
+
print(len(selected_df))
|
| 499 |
+
|
| 500 |
+
# Train/val split
|
| 501 |
+
split_idx = int(len(selected_df) * train_ratio)
|
| 502 |
+
train_df = selected_df.iloc[:split_idx].drop(columns=["assistant_content"], errors="ignore")
|
| 503 |
+
val_df = selected_df.iloc[split_idx:].drop(columns=["assistant_content"], errors="ignore")
|
| 504 |
+
|
| 505 |
+
# Save back to JSONL
|
| 506 |
+
train_df.to_json(train_output, orient="records", lines=True, force_ascii=False)
|
| 507 |
+
val_df.to_json(val_output, orient="records", lines=True, force_ascii=False)
|
| 508 |
+
|
| 509 |
+
print(f"Saved {len(train_df)} samples to {train_output}")
|
| 510 |
+
print(f"Saved {len(val_df)} samples to {val_output}")
|
| 511 |
+
|
| 512 |
+
def get_assistant_content(messages):
|
| 513 |
+
for msg in messages:
|
| 514 |
+
if msg.get("role") == "assistant":
|
| 515 |
+
return msg.get("content", "")
|
| 516 |
+
return ""
|
llm/api.py
CHANGED
|
@@ -2,31 +2,43 @@ import re
|
|
| 2 |
from typing import Dict, Optional
|
| 3 |
from fastapi import FastAPI
|
| 4 |
from pydantic import BaseModel
|
| 5 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
from peft import PeftModel
|
| 7 |
import uvicorn
|
|
|
|
| 8 |
|
| 9 |
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct"
|
| 10 |
-
|
| 11 |
-
#
|
|
|
|
| 12 |
|
| 13 |
# # Load tokenizer
|
| 14 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Load base model
|
| 17 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
base_model_id,
|
| 19 |
-
device_map="auto",
|
| 20 |
-
|
| 21 |
-
|
|
|
|
| 22 |
|
| 23 |
-
model, tokenizer = load(base_model_id)
|
| 24 |
|
| 25 |
# Load LoRA weights
|
| 26 |
model = PeftModel.from_pretrained(base_model, lora_adapter_or_id)
|
| 27 |
|
| 28 |
# Merge LoRA into the base model (optional if you want a standalone model)
|
| 29 |
model = model.merge_and_unload()
|
|
|
|
| 30 |
|
| 31 |
import re
|
| 32 |
from typing import Dict, Optional
|
|
@@ -60,14 +72,15 @@ def parse_action(output: str):
|
|
| 60 |
|
| 61 |
actions[name] = duration
|
| 62 |
|
|
|
|
| 63 |
return {"action": actions}
|
| 64 |
|
| 65 |
|
| 66 |
def get_finetuned_action(query_state):
|
| 67 |
# Inference with chat template
|
| 68 |
messages = [
|
| 69 |
-
{"role": "system", "content": "You are a Sumobot assistant
|
| 70 |
-
{"role": "user", "content": f"
|
| 71 |
]
|
| 72 |
|
| 73 |
# Apply the tokenizer's built-in chat template
|
|
@@ -81,13 +94,9 @@ def get_finetuned_action(query_state):
|
|
| 81 |
|
| 82 |
outputs = model.generate(
|
| 83 |
**inputs,
|
| 84 |
-
max_new_tokens=
|
| 85 |
)
|
| 86 |
|
| 87 |
-
result = ""
|
| 88 |
-
for token in generate(model, tokenizer, chat_prompt, max_tokens=50):
|
| 89 |
-
result += token
|
| 90 |
-
|
| 91 |
# Slice out only the newly generated tokens
|
| 92 |
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
|
| 93 |
|
|
|
|
| 2 |
from typing import Dict, Optional
|
| 3 |
from fastapi import FastAPI
|
| 4 |
from pydantic import BaseModel
|
| 5 |
+
from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
|
| 6 |
from peft import PeftModel
|
| 7 |
import uvicorn
|
| 8 |
+
import torch
|
| 9 |
|
| 10 |
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct"
|
| 11 |
+
# base_model_id = "qwen2.5-0.5b-instruct-sumobot-merged"
|
| 12 |
+
#lora_adapter_or_id = "qwen2.5-0.5b-instruct-sumobot"
|
| 13 |
+
lora_adapter_or_id = "adapters/qwen2.5_0.5b_lora_half"
|
| 14 |
|
| 15 |
# # Load tokenizer
|
| 16 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 17 |
+
base_model_id,
|
| 18 |
+
trust_remote_code=True, use_fast=True)
|
| 19 |
+
|
| 20 |
+
bnb_config = BitsAndBytesConfig(
|
| 21 |
+
load_in_4bit=True,
|
| 22 |
+
bnb_4bit_use_double_quant=False,
|
| 23 |
+
bnb_4bit_quant_type="nf4",
|
| 24 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 25 |
+
)
|
| 26 |
|
| 27 |
# Load base model
|
| 28 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 29 |
base_model_id,
|
| 30 |
+
device_map="auto",
|
| 31 |
+
# quantization_config=bnb_config,
|
| 32 |
+
# torch_dtype=torch.bfloat16,
|
| 33 |
+
trust_remote_code=True )
|
| 34 |
|
|
|
|
| 35 |
|
| 36 |
# Load LoRA weights
|
| 37 |
model = PeftModel.from_pretrained(base_model, lora_adapter_or_id)
|
| 38 |
|
| 39 |
# Merge LoRA into the base model (optional if you want a standalone model)
|
| 40 |
model = model.merge_and_unload()
|
| 41 |
+
# model.eval()
|
| 42 |
|
| 43 |
import re
|
| 44 |
from typing import Dict, Optional
|
|
|
|
| 72 |
|
| 73 |
actions[name] = duration
|
| 74 |
|
| 75 |
+
print(f"result: {actions}")
|
| 76 |
return {"action": actions}
|
| 77 |
|
| 78 |
|
| 79 |
def get_finetuned_action(query_state):
|
| 80 |
# Inference with chat template
|
| 81 |
messages = [
|
| 82 |
+
{"role": "system", "content": "You are a Sumobot assistant"},
|
| 83 |
+
{"role": "user", "content": f"State: {query_state}"},
|
| 84 |
]
|
| 85 |
|
| 86 |
# Apply the tokenizer's built-in chat template
|
|
|
|
| 94 |
|
| 95 |
outputs = model.generate(
|
| 96 |
**inputs,
|
| 97 |
+
max_new_tokens=15
|
| 98 |
)
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
# Slice out only the newly generated tokens
|
| 101 |
generated_tokens = outputs[0][inputs["input_ids"].shape[1]:]
|
| 102 |
|
llm/api_gguf.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Model Pool Sumobot API - Multiple model instances for true parallelism
|
| 3 |
+
Requires more GPU memory but can process multiple requests simultaneously
|
| 4 |
+
"""
|
| 5 |
+
import re
|
| 6 |
+
import asyncio
|
| 7 |
+
from typing import Dict, Optional, List
|
| 8 |
+
from fastapi import FastAPI
|
| 9 |
+
from pydantic import BaseModel
|
| 10 |
+
import uvicorn
|
| 11 |
+
from llama_cpp import Llama
|
| 12 |
+
import multiprocessing as mp
|
| 13 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 14 |
+
|
| 15 |
+
# ==================== Configuration ====================
|
| 16 |
+
MODEL_PATH = "models/qwen2.5-sumobot-half-q8_0.gguf"
|
| 17 |
+
N_GPU_LAYERS = -1
|
| 18 |
+
NUM_WORKERS = 20 # Number of parallel model instances
|
| 19 |
+
|
| 20 |
+
ACTION_PATTERN = re.compile(r"^([A-Za-z]+)\s*([\d.]+)$")
|
| 21 |
+
|
| 22 |
+
# ==================== Worker Process ====================
|
| 23 |
+
class ModelWorker:
|
| 24 |
+
"""Each worker has its own model instance"""
|
| 25 |
+
|
| 26 |
+
def __init__(self):
|
| 27 |
+
self.model = None
|
| 28 |
+
|
| 29 |
+
def initialize(self):
|
| 30 |
+
"""Load model in worker process"""
|
| 31 |
+
if self.model is None:
|
| 32 |
+
print(f"Loading model in worker {mp.current_process().name}...")
|
| 33 |
+
self.model = Llama(
|
| 34 |
+
model_path=MODEL_PATH,
|
| 35 |
+
n_ctx=512,
|
| 36 |
+
n_threads=2, # Fewer threads per worker
|
| 37 |
+
n_gpu_layers=N_GPU_LAYERS,
|
| 38 |
+
verbose=False,
|
| 39 |
+
n_batch=512,
|
| 40 |
+
use_mmap=True,
|
| 41 |
+
)
|
| 42 |
+
print(f"✅ Model loaded in {mp.current_process().name}")
|
| 43 |
+
|
| 44 |
+
def parse_action(self, output: str) -> Dict[str, Optional[float]]:
|
| 45 |
+
"""Parse model output to action dict"""
|
| 46 |
+
action_map = {
|
| 47 |
+
"SK": "Skill", "DS": "Dash", "FWD": "Accelerate",
|
| 48 |
+
"TL": "TurnLeft", "TR": "TurnRight",
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
actions = {}
|
| 52 |
+
for part in output.split(","):
|
| 53 |
+
part = part.strip()
|
| 54 |
+
if not part:
|
| 55 |
+
continue
|
| 56 |
+
|
| 57 |
+
name = part
|
| 58 |
+
duration = None
|
| 59 |
+
|
| 60 |
+
match = ACTION_PATTERN.match(part)
|
| 61 |
+
if match:
|
| 62 |
+
name = match.group(1)
|
| 63 |
+
duration = float(match.group(2))
|
| 64 |
+
|
| 65 |
+
name_upper = name.upper()
|
| 66 |
+
for short, full in action_map.items():
|
| 67 |
+
if name_upper.startswith(short):
|
| 68 |
+
actions[full] = duration
|
| 69 |
+
break
|
| 70 |
+
|
| 71 |
+
return actions
|
| 72 |
+
|
| 73 |
+
def process(self, state: str) -> dict:
|
| 74 |
+
"""Process a single request"""
|
| 75 |
+
import time
|
| 76 |
+
|
| 77 |
+
if self.model is None:
|
| 78 |
+
self.initialize()
|
| 79 |
+
|
| 80 |
+
prompt = f"""<|im_start|>system
|
| 81 |
+
Sumobot assistant.<|im_end|>
|
| 82 |
+
<|im_start|>user
|
| 83 |
+
{state}<|im_end|>
|
| 84 |
+
<|im_start|>assistant
|
| 85 |
+
action:"""
|
| 86 |
+
|
| 87 |
+
start_time = time.time()
|
| 88 |
+
|
| 89 |
+
output = self.model(
|
| 90 |
+
prompt,
|
| 91 |
+
max_tokens=50,
|
| 92 |
+
temperature=0.1,
|
| 93 |
+
stop=["<|im_end|>", "\n"],
|
| 94 |
+
top_p=0.95,
|
| 95 |
+
echo=False
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
inference_time = (time.time() - start_time) * 1000
|
| 99 |
+
raw_output = output['choices'][0]['text'].strip()
|
| 100 |
+
|
| 101 |
+
return {
|
| 102 |
+
"raw_output": raw_output,
|
| 103 |
+
"action": self.parse_action(raw_output),
|
| 104 |
+
"inference_time_ms": round(inference_time, 2),
|
| 105 |
+
"worker": mp.current_process().name
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
# ==================== Global Worker Instance ====================
|
| 109 |
+
_worker = ModelWorker()
|
| 110 |
+
|
| 111 |
+
def process_request(state: str) -> dict:
|
| 112 |
+
"""Global function for ProcessPoolExecutor"""
|
| 113 |
+
return _worker.process(state)
|
| 114 |
+
|
| 115 |
+
# ==================== FastAPI ====================
|
| 116 |
+
app = FastAPI()
|
| 117 |
+
|
| 118 |
+
class QueryInput(BaseModel):
|
| 119 |
+
state: str
|
| 120 |
+
|
| 121 |
+
# Process pool for parallel execution
|
| 122 |
+
executor = None
|
| 123 |
+
|
| 124 |
+
@app.on_event("startup")
|
| 125 |
+
async def startup_event():
|
| 126 |
+
"""Initialize process pool on startup"""
|
| 127 |
+
global executor
|
| 128 |
+
|
| 129 |
+
print(f"🚀 Starting {NUM_WORKERS} model workers...")
|
| 130 |
+
executor = ProcessPoolExecutor(
|
| 131 |
+
max_workers=NUM_WORKERS,
|
| 132 |
+
mp_context=mp.get_context('spawn')
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# Warm up workers
|
| 136 |
+
print("🔥 Warming up workers...")
|
| 137 |
+
dummy_state = "BotPos=[0,0], BotRot=0, EnemyPos=[1,1], EnemyRot=0"
|
| 138 |
+
futures = [
|
| 139 |
+
executor.submit(process_request, dummy_state)
|
| 140 |
+
for _ in range(NUM_WORKERS)
|
| 141 |
+
]
|
| 142 |
+
for future in futures:
|
| 143 |
+
future.result()
|
| 144 |
+
|
| 145 |
+
print(f"✅ {NUM_WORKERS} workers ready!\n")
|
| 146 |
+
|
| 147 |
+
@app.on_event("shutdown")
|
| 148 |
+
async def shutdown_event():
|
| 149 |
+
"""Clean up process pool"""
|
| 150 |
+
global executor
|
| 151 |
+
if executor:
|
| 152 |
+
executor.shutdown(wait=True)
|
| 153 |
+
|
| 154 |
+
@app.post("/query")
|
| 155 |
+
async def query(input: QueryInput):
|
| 156 |
+
"""
|
| 157 |
+
Process request using worker pool
|
| 158 |
+
Can handle NUM_WORKERS requests truly in parallel
|
| 159 |
+
"""
|
| 160 |
+
try:
|
| 161 |
+
loop = asyncio.get_event_loop()
|
| 162 |
+
result = await loop.run_in_executor(
|
| 163 |
+
executor,
|
| 164 |
+
process_request,
|
| 165 |
+
input.state
|
| 166 |
+
)
|
| 167 |
+
return result
|
| 168 |
+
except Exception as e:
|
| 169 |
+
return {
|
| 170 |
+
"error": str(e),
|
| 171 |
+
"message": "Failed to process request"
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
@app.get("/stats")
|
| 175 |
+
async def stats():
|
| 176 |
+
"""Get worker pool statistics"""
|
| 177 |
+
return {
|
| 178 |
+
"num_workers": NUM_WORKERS,
|
| 179 |
+
"model_path": MODEL_PATH,
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
# ==================== Run ====================
|
| 183 |
+
if __name__ == "__main__":
|
| 184 |
+
# Required for Windows multiprocessing
|
| 185 |
+
mp.set_start_method('spawn', force=True)
|
| 186 |
+
|
| 187 |
+
print(f"🚀 Starting API with {NUM_WORKERS} parallel workers")
|
| 188 |
+
print("⚡ Can process multiple requests simultaneously\n")
|
| 189 |
+
|
| 190 |
+
uvicorn.run(
|
| 191 |
+
app,
|
| 192 |
+
host="0.0.0.0",
|
| 193 |
+
port=8000,
|
| 194 |
+
workers=1,
|
| 195 |
+
log_level="warning"
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# ==================== Notes ====================
|
| 199 |
+
"""
|
| 200 |
+
MEMORY REQUIREMENTS:
|
| 201 |
+
- Each worker loads a full model copy
|
| 202 |
+
- Q4 model ~800MB × 4 workers = ~3.2GB VRAM minimum
|
| 203 |
+
- Adjust NUM_WORKERS based on your GPU memory
|
| 204 |
+
|
| 205 |
+
PERFORMANCE:
|
| 206 |
+
- Queue-based (single model): ~50ms per request, sequential
|
| 207 |
+
- Pool-based (4 models): ~50ms per request, but 4 in parallel
|
| 208 |
+
- For 4 Unity clients @ 100ms interval: Pool is 4x faster
|
| 209 |
+
|
| 210 |
+
When to use which:
|
| 211 |
+
- Queue (single model): <3 concurrent clients, limited VRAM
|
| 212 |
+
- Pool (multiple models): 3+ concurrent clients, enough VRAM
|
| 213 |
+
"""
|
llm/api_mlx.py
CHANGED
|
@@ -27,9 +27,9 @@ def parse_action(output: str):
|
|
| 27 |
duration = None
|
| 28 |
|
| 29 |
direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part)
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
|
| 34 |
# Normalize shorthand to full name
|
| 35 |
for short, full in action_map.items():
|
|
|
|
| 27 |
duration = None
|
| 28 |
|
| 29 |
direct_match = re.match(r"^([A-Za-z]+)\s*([\d.]+)$", part)
|
| 30 |
+
if direct_match:
|
| 31 |
+
name = direct_match.group(1).strip()
|
| 32 |
+
duration = float(direct_match.group(2))
|
| 33 |
|
| 34 |
# Normalize shorthand to full name
|
| 35 |
for short, full in action_map.items():
|
llm/complete_dataset.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc5b5d3a9b21f71d3d0737bee4ade72b71920ce430fc4e81a9e2ed483438d406
|
| 3 |
+
size 325872710
|
llm/convert_to_gguf.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import shutil
|
| 6 |
+
|
| 7 |
+
print("="*70)
|
| 8 |
+
print("🪟 Windows GGUF Conversion for Qwen2.5 0.5B + LoRA")
|
| 9 |
+
print("="*70)
|
| 10 |
+
print()
|
| 11 |
+
|
| 12 |
+
# ==================== Configuration ====================
|
| 13 |
+
base_model_id = "Qwen/Qwen2.5-0.5B-Instruct"
|
| 14 |
+
lora_adapter_path = "adapters/qwen2.5_0.5b_lora_half2" # Your LoRA adapter folder
|
| 15 |
+
|
| 16 |
+
# Output paths
|
| 17 |
+
merged_model_path = "./merged_qwen2.5_sumobot_half2"
|
| 18 |
+
gguf_output_dir = "./models"
|
| 19 |
+
gguf_fp16_path = os.path.join(gguf_output_dir, "qwen2.5-sumobot-half-fp16.gguf")
|
| 20 |
+
|
| 21 |
+
# ==================== Prerequisites Check ====================
|
| 22 |
+
print("📋 Step 1: Checking prerequisites...")
|
| 23 |
+
print("-"*70)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# # ==================== Step 1: Merge LoRA with Base Model ====================
|
| 27 |
+
# print("="*70)
|
| 28 |
+
# print("📦 Step 2: Merging LoRA adapter with base model")
|
| 29 |
+
# print("="*70)
|
| 30 |
+
# print()
|
| 31 |
+
|
| 32 |
+
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 33 |
+
# from peft import PeftModel
|
| 34 |
+
# import torch
|
| 35 |
+
|
| 36 |
+
# print(f"Loading base model: {base_model_id}")
|
| 37 |
+
# tokenizer = AutoTokenizer.from_pretrained(base_model_id)
|
| 38 |
+
# base_model = AutoModelForCausalLM.from_pretrained(
|
| 39 |
+
# base_model_id,
|
| 40 |
+
# # torch_dtype=torch.float16,
|
| 41 |
+
# device_map="auto", # Keep on CPU for Windows compatibility
|
| 42 |
+
# trust_remote_code=True
|
| 43 |
+
# )
|
| 44 |
+
|
| 45 |
+
# print(f"Loading LoRA adapter: {lora_adapter_path}")
|
| 46 |
+
# model = PeftModel.from_pretrained(base_model, lora_adapter_path)
|
| 47 |
+
|
| 48 |
+
# print("Merging LoRA weights into base model...")
|
| 49 |
+
# merged_model = model.merge_and_unload()
|
| 50 |
+
|
| 51 |
+
# print(f"Saving merged model to: {merged_model_path}")
|
| 52 |
+
# os.makedirs(merged_model_path, exist_ok=True)
|
| 53 |
+
# merged_model.save_pretrained(merged_model_path, safe_serialization=True)
|
| 54 |
+
# tokenizer.save_pretrained(merged_model_path)
|
| 55 |
+
|
| 56 |
+
# print("✅ Merged model saved!")
|
| 57 |
+
# print()
|
| 58 |
+
|
| 59 |
+
# # ==================== Step 3: Convert to GGUF FP16 ====================
|
| 60 |
+
# print("="*70)
|
| 61 |
+
# print("🔄 Step 4: Converting to GGUF FP16 format")
|
| 62 |
+
# print("="*70)
|
| 63 |
+
# print()
|
| 64 |
+
# from pathlib import Path
|
| 65 |
+
|
| 66 |
+
# ROOT_DIR = Path(r"D:\sumobot")
|
| 67 |
+
# SCRIPT_PATH = ROOT_DIR / "llama.cpp" / "convert_hf_to_gguf.py"
|
| 68 |
+
|
| 69 |
+
# os.makedirs(gguf_output_dir, exist_ok=True)
|
| 70 |
+
|
| 71 |
+
# if not SCRIPT_PATH.exists():
|
| 72 |
+
# print("❌ Conversion script not found!")
|
| 73 |
+
# print(f"Expected at: {SCRIPT_PATH}")
|
| 74 |
+
# sys.exit(1)
|
| 75 |
+
|
| 76 |
+
# print(f"Converting to: {gguf_fp16_path}")
|
| 77 |
+
# print("This may take a few minutes...")
|
| 78 |
+
|
| 79 |
+
# try:
|
| 80 |
+
# subprocess.run([
|
| 81 |
+
# sys.executable, # Use current Python interpreter
|
| 82 |
+
# str(SCRIPT_PATH),
|
| 83 |
+
# merged_model_path,
|
| 84 |
+
# "--outfile", gguf_fp16_path,
|
| 85 |
+
# "--outtype", "f16"
|
| 86 |
+
# ], check=True)
|
| 87 |
+
|
| 88 |
+
# file_size = os.path.getsize(gguf_fp16_path) / (1024**2)
|
| 89 |
+
# print(f"✅ GGUF FP16 created: {file_size:.1f} MB")
|
| 90 |
+
# except subprocess.CalledProcessError:
|
| 91 |
+
# print("❌ Conversion failed!")
|
| 92 |
+
# sys.exit(1)
|
| 93 |
+
|
| 94 |
+
# print()
|
| 95 |
+
|
| 96 |
+
# ==================== Step 4: Quantize to Different Formats ====================
|
| 97 |
+
print("="*70)
|
| 98 |
+
print("⚡ Step 5: Quantizing to different formats")
|
| 99 |
+
print("="*70)
|
| 100 |
+
print()
|
| 101 |
+
|
| 102 |
+
from pathlib import Path
|
| 103 |
+
|
| 104 |
+
ROOT_DIR = Path(r"D:\sumobot")
|
| 105 |
+
SCRIPT_PATH = ROOT_DIR / "llama.cpp"
|
| 106 |
+
|
| 107 |
+
# Find quantize executable on Windows
|
| 108 |
+
quantize_exe = SCRIPT_PATH / "build" / "bin" / "Release" / "llama-quantize.exe"
|
| 109 |
+
|
| 110 |
+
# if not quantize_exe.exists():
|
| 111 |
+
# # Try alternative paths
|
| 112 |
+
# alt_paths = [
|
| 113 |
+
# llama_cpp_path / "build" / "Release" / "llama-quantize.exe",
|
| 114 |
+
# llama_cpp_path / "build" / "bin" / "llama-quantize.exe",
|
| 115 |
+
# ]
|
| 116 |
+
|
| 117 |
+
# for alt_path in alt_paths:
|
| 118 |
+
# if alt_path.exists():
|
| 119 |
+
# quantize_exe = alt_path
|
| 120 |
+
# break
|
| 121 |
+
# else:
|
| 122 |
+
# print("❌ llama-quantize.exe not found!")
|
| 123 |
+
# print("Build may have failed. Check build output above.")
|
| 124 |
+
# sys.exit(1)
|
| 125 |
+
|
| 126 |
+
# print(f"Using quantize tool: {quantize_exe}")
|
| 127 |
+
# print()
|
| 128 |
+
|
| 129 |
+
# Quantization types
|
| 130 |
+
quant_configs = {
|
| 131 |
+
"Q8_0": "Best quality (~50% compression)",
|
| 132 |
+
"Q5_K_M": "Balanced (recommended)",
|
| 133 |
+
"Q4_K_M": "Fast inference",
|
| 134 |
+
"Q4_0": "Fastest (good quality)",
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
quantized_models = {}
|
| 138 |
+
|
| 139 |
+
for quant_type, description in quant_configs.items():
|
| 140 |
+
output_path = os.path.join(gguf_output_dir, f"qwen2.5-sumobot-half-{quant_type.lower()}.gguf")
|
| 141 |
+
|
| 142 |
+
print(f"📦 Quantizing to {quant_type}...")
|
| 143 |
+
print(f" {description}")
|
| 144 |
+
|
| 145 |
+
try:
|
| 146 |
+
subprocess.run([
|
| 147 |
+
str(quantize_exe),
|
| 148 |
+
gguf_fp16_path,
|
| 149 |
+
output_path,
|
| 150 |
+
quant_type
|
| 151 |
+
], check=True)
|
| 152 |
+
|
| 153 |
+
size_mb = os.path.getsize(output_path) / (1024**2)
|
| 154 |
+
quantized_models[quant_type] = {
|
| 155 |
+
"path": output_path,
|
| 156 |
+
"size_mb": size_mb
|
| 157 |
+
}
|
| 158 |
+
print(f" ✅ Created: {size_mb:.1f} MB")
|
| 159 |
+
except subprocess.CalledProcessError:
|
| 160 |
+
print(f" ❌ Failed to create {quant_type}")
|
| 161 |
+
|
| 162 |
+
print()
|
| 163 |
+
|
| 164 |
+
# # ==================== Step 5: Test the Model ====================
|
| 165 |
+
# print("="*70)
|
| 166 |
+
# print("🧪 Step 6: Testing quantized model")
|
| 167 |
+
# print("="*70)
|
| 168 |
+
# print()
|
| 169 |
+
|
| 170 |
+
# print("Installing llama-cpp-python for Windows...")
|
| 171 |
+
# try:
|
| 172 |
+
# # Install with pip
|
| 173 |
+
# subprocess.run([
|
| 174 |
+
# sys.executable, "-m", "pip", "install", "llama-cpp-python"
|
| 175 |
+
# ], check=True)
|
| 176 |
+
# print("✅ llama-cpp-python installed")
|
| 177 |
+
# except subprocess.CalledProcessError:
|
| 178 |
+
# print("⚠️ Could not install llama-cpp-python automatically")
|
| 179 |
+
# print("Install manually with: pip install llama-cpp-python")
|
| 180 |
+
|
| 181 |
+
# print()
|
| 182 |
+
|
| 183 |
+
# # Test with one of the quantized models
|
| 184 |
+
# if quantized_models:
|
| 185 |
+
# print("Testing model inference...")
|
| 186 |
+
|
| 187 |
+
# try:
|
| 188 |
+
# from llama_cpp import Llama
|
| 189 |
+
|
| 190 |
+
# # Use Q5_K_M model for testing (good balance)
|
| 191 |
+
# test_model_path = quantized_models.get("Q5_K_M", list(quantized_models.values())[0])["path"]
|
| 192 |
+
|
| 193 |
+
# print(f"Loading: {test_model_path}")
|
| 194 |
+
# llm = Llama(
|
| 195 |
+
# model_path=test_model_path,
|
| 196 |
+
# n_ctx=2048,
|
| 197 |
+
# n_threads=4,
|
| 198 |
+
# verbose=False
|
| 199 |
+
# )
|
| 200 |
+
|
| 201 |
+
# # Test prompt
|
| 202 |
+
# test_state = "AngleToEnemy=7.77, AngleToEnemyScore=0.99, DistanceToEnemyScore=0.76, NearBorderArenaScore=0.11, FacingToArena=-0.32"
|
| 203 |
+
# prompt = f"sumobot state: {test_state}\naction:"
|
| 204 |
+
|
| 205 |
+
# print(f"\nTest input: {test_state}")
|
| 206 |
+
# print("Generating action...\n")
|
| 207 |
+
|
| 208 |
+
# import time
|
| 209 |
+
# start = time.time()
|
| 210 |
+
|
| 211 |
+
# output = llm(
|
| 212 |
+
# prompt,
|
| 213 |
+
# max_tokens=50,
|
| 214 |
+
# temperature=0.1,
|
| 215 |
+
# stop=["<|endoftext|>", "\n\n"]
|
| 216 |
+
# )
|
| 217 |
+
|
| 218 |
+
# elapsed = (time.time() - start) * 1000
|
| 219 |
+
|
| 220 |
+
# generated = output['choices'][0]['text'].strip()
|
| 221 |
+
# print(f"Generated action: {generated}")
|
| 222 |
+
# print(f"⏱️ Inference time: {elapsed:.1f}ms")
|
| 223 |
+
|
| 224 |
+
# if elapsed < 100:
|
| 225 |
+
# print("✅ Meets <100ms requirement!")
|
| 226 |
+
# else:
|
| 227 |
+
# print("⚠️ Slower than 100ms target")
|
| 228 |
+
|
| 229 |
+
# except ImportError:
|
| 230 |
+
# print("⚠️ llama-cpp-python not available for testing")
|
| 231 |
+
# print("Install it later to test: pip install llama-cpp-python")
|
| 232 |
+
# except Exception as e:
|
| 233 |
+
# print(f"⚠️ Test failed: {e}")
|
| 234 |
+
|
| 235 |
+
# print()
|
llm/dataset_prompt.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Parallel Batched LLM Inference for GGUF models
|
| 4 |
+
Splits dataset into batches and processes them in parallel with multiple workers
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
import multiprocessing as mp
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
def count_lines(filename):
|
| 14 |
+
"""Count total lines in a file"""
|
| 15 |
+
count = 0
|
| 16 |
+
try:
|
| 17 |
+
with open(filename, 'r') as f:
|
| 18 |
+
for _ in f:
|
| 19 |
+
count += 1
|
| 20 |
+
except:
|
| 21 |
+
pass
|
| 22 |
+
return count
|
| 23 |
+
|
| 24 |
+
def split_files_into_batches(input_files, num_batches, temp_dir="./batches"):
|
| 25 |
+
"""
|
| 26 |
+
Split multiple input files into batch files
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
input_files: List of input files
|
| 30 |
+
num_batches: Number of batches to create
|
| 31 |
+
temp_dir: Directory to store batch files
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
List of batch file paths
|
| 35 |
+
"""
|
| 36 |
+
print("=" * 70)
|
| 37 |
+
print("STEP 1: Splitting files into batches")
|
| 38 |
+
print("=" * 70)
|
| 39 |
+
|
| 40 |
+
# Create temp directory
|
| 41 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 42 |
+
|
| 43 |
+
# Count total lines
|
| 44 |
+
print("\nCounting total lines...")
|
| 45 |
+
total_lines = 0
|
| 46 |
+
for input_file in input_files:
|
| 47 |
+
lines = count_lines(input_file)
|
| 48 |
+
total_lines += lines
|
| 49 |
+
print(f" {input_file}: {lines:,} lines")
|
| 50 |
+
|
| 51 |
+
print(f"\nTotal lines: {total_lines:,}")
|
| 52 |
+
|
| 53 |
+
if total_lines == 0:
|
| 54 |
+
print("✗ No data found in input files!")
|
| 55 |
+
return []
|
| 56 |
+
|
| 57 |
+
# Calculate lines per batch
|
| 58 |
+
lines_per_batch = total_lines // num_batches
|
| 59 |
+
remainder = total_lines % num_batches
|
| 60 |
+
|
| 61 |
+
print(f"Lines per batch: ~{lines_per_batch:,}")
|
| 62 |
+
print(f"Creating {num_batches} batch files...\n")
|
| 63 |
+
|
| 64 |
+
# Create batch files
|
| 65 |
+
batch_files = []
|
| 66 |
+
current_batch = 0
|
| 67 |
+
current_batch_lines = 0
|
| 68 |
+
current_batch_file = None
|
| 69 |
+
|
| 70 |
+
for input_file in input_files:
|
| 71 |
+
print(f"Processing {input_file}...")
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
with open(input_file, 'r') as infile:
|
| 75 |
+
for line in tqdm(infile, desc=" Splitting"):
|
| 76 |
+
line = line.strip()
|
| 77 |
+
if not line:
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
# Open new batch file if needed
|
| 81 |
+
if current_batch_file is None or current_batch_lines >= lines_per_batch:
|
| 82 |
+
# Close previous batch file
|
| 83 |
+
if current_batch_file is not None:
|
| 84 |
+
current_batch_file.close()
|
| 85 |
+
|
| 86 |
+
# Open new batch file
|
| 87 |
+
if current_batch < num_batches:
|
| 88 |
+
batch_path = os.path.join(temp_dir, f"batch_{current_batch:04d}.jsonl")
|
| 89 |
+
batch_files.append(batch_path)
|
| 90 |
+
current_batch_file = open(batch_path, 'w')
|
| 91 |
+
current_batch += 1
|
| 92 |
+
current_batch_lines = 0
|
| 93 |
+
|
| 94 |
+
# Write line to current batch
|
| 95 |
+
if current_batch_file is not None:
|
| 96 |
+
current_batch_file.write(line + '\n')
|
| 97 |
+
current_batch_lines += 1
|
| 98 |
+
|
| 99 |
+
except FileNotFoundError:
|
| 100 |
+
print(f" ✗ File not found: {input_file}")
|
| 101 |
+
continue
|
| 102 |
+
except Exception as e:
|
| 103 |
+
print(f" ✗ Error: {e}")
|
| 104 |
+
continue
|
| 105 |
+
|
| 106 |
+
# Close last batch file
|
| 107 |
+
if current_batch_file is not None:
|
| 108 |
+
current_batch_file.close()
|
| 109 |
+
|
| 110 |
+
print(f"\n✓ Created {len(batch_files)} batch files in {temp_dir}/\n")
|
| 111 |
+
|
| 112 |
+
return batch_files
|
| 113 |
+
|
| 114 |
+
def worker_process(worker_id, batch_file, output_file, model_path, config):
|
| 115 |
+
"""
|
| 116 |
+
Worker process that runs inference on one batch
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
worker_id: ID of this worker
|
| 120 |
+
batch_file: Input batch file path
|
| 121 |
+
output_file: Output file path for this batch
|
| 122 |
+
model_path: Path to GGUF model
|
| 123 |
+
config: Configuration dict
|
| 124 |
+
"""
|
| 125 |
+
try:
|
| 126 |
+
from llama_cpp import Llama
|
| 127 |
+
|
| 128 |
+
# Load model
|
| 129 |
+
print(f"[Worker {worker_id}] Loading model...")
|
| 130 |
+
llm = Llama(
|
| 131 |
+
model_path=model_path,
|
| 132 |
+
n_ctx=config.get('n_ctx', 512),
|
| 133 |
+
n_threads=config.get('n_threads', 4),
|
| 134 |
+
n_gpu_layers=config.get('n_gpu_layers', 0),
|
| 135 |
+
verbose=False
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
print(f"[Worker {worker_id}] Model loaded. Processing {batch_file}...")
|
| 139 |
+
|
| 140 |
+
# Process batch
|
| 141 |
+
processed = 0
|
| 142 |
+
skipped = 0
|
| 143 |
+
|
| 144 |
+
with open(batch_file, 'r') as infile, open(output_file, 'w') as outfile:
|
| 145 |
+
for line in infile:
|
| 146 |
+
line = line.strip()
|
| 147 |
+
if not line:
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
try:
|
| 151 |
+
data = json.loads(line)
|
| 152 |
+
state = data.get('state', '')
|
| 153 |
+
existing_action = data.get('action', '')
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
prompt = f"""<|im_start|>system
|
| 157 |
+
Sumobot assistant.<|im_end|>
|
| 158 |
+
<|im_start|>user
|
| 159 |
+
{state}<|im_end|>
|
| 160 |
+
<|im_start|>assistant
|
| 161 |
+
action:"""
|
| 162 |
+
|
| 163 |
+
output = llm(
|
| 164 |
+
prompt,
|
| 165 |
+
max_tokens=config.get('max_tokens', 32),
|
| 166 |
+
temperature=config.get('temperature', 0.1),
|
| 167 |
+
stop=["<|im_end|>", "\n"],
|
| 168 |
+
echo=False,
|
| 169 |
+
top_p=config.get('top_p', 0.95),
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
action = output['choices'][0]['text'].strip()
|
| 173 |
+
|
| 174 |
+
result = {
|
| 175 |
+
"state": state,
|
| 176 |
+
"action": action
|
| 177 |
+
}
|
| 178 |
+
processed += 1
|
| 179 |
+
|
| 180 |
+
# Write result
|
| 181 |
+
outfile.write(json.dumps(result) + '\n')
|
| 182 |
+
|
| 183 |
+
# Progress update
|
| 184 |
+
if (processed + skipped) % 100 == 0:
|
| 185 |
+
outfile.flush()
|
| 186 |
+
|
| 187 |
+
except Exception as e:
|
| 188 |
+
print(f"[Worker {worker_id}] Error processing line: {e}")
|
| 189 |
+
continue
|
| 190 |
+
|
| 191 |
+
print(f"[Worker {worker_id}] ✓ Completed! Processed: {processed:,}, Skipped: {skipped:,}")
|
| 192 |
+
return processed, skipped
|
| 193 |
+
|
| 194 |
+
except Exception as e:
|
| 195 |
+
print(f"[Worker {worker_id}] ✗ Fatal error: {e}")
|
| 196 |
+
return 0, 0
|
| 197 |
+
|
| 198 |
+
def merge_batch_outputs(batch_output_files, final_output_file):
|
| 199 |
+
"""
|
| 200 |
+
Merge all batch output files into final output
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
batch_output_files: List of batch output files
|
| 204 |
+
final_output_file: Final merged output file
|
| 205 |
+
"""
|
| 206 |
+
print("\n" + "=" * 70)
|
| 207 |
+
print("STEP 3: Merging batch results")
|
| 208 |
+
print("=" * 70)
|
| 209 |
+
|
| 210 |
+
total_lines = 0
|
| 211 |
+
|
| 212 |
+
with open(final_output_file, 'w') as outfile:
|
| 213 |
+
for batch_file in batch_output_files:
|
| 214 |
+
if not os.path.exists(batch_file):
|
| 215 |
+
print(f" ✗ Batch output not found: {batch_file}")
|
| 216 |
+
continue
|
| 217 |
+
|
| 218 |
+
lines = 0
|
| 219 |
+
with open(batch_file, 'r') as infile:
|
| 220 |
+
for line in infile:
|
| 221 |
+
outfile.write(line)
|
| 222 |
+
lines += 1
|
| 223 |
+
|
| 224 |
+
total_lines += lines
|
| 225 |
+
print(f" ✓ Merged {batch_file}: {lines:,} lines")
|
| 226 |
+
|
| 227 |
+
print(f"\n✓ Final output: {final_output_file}")
|
| 228 |
+
print(f" Total lines: {total_lines:,}\n")
|
| 229 |
+
|
| 230 |
+
def run_parallel_inference(input_files, output_file, model_path, num_batches=10, config=None):
|
| 231 |
+
"""
|
| 232 |
+
Main function to run parallel batched inference
|
| 233 |
+
|
| 234 |
+
Args:
|
| 235 |
+
input_files: List of input JSONL files
|
| 236 |
+
output_file: Final output file
|
| 237 |
+
model_path: Path to GGUF model
|
| 238 |
+
num_batches: Number of parallel batches
|
| 239 |
+
config: Model configuration dict
|
| 240 |
+
"""
|
| 241 |
+
if config is None:
|
| 242 |
+
config = {
|
| 243 |
+
'n_ctx': 512,
|
| 244 |
+
'n_threads': 4,
|
| 245 |
+
'n_gpu_layers': 0,
|
| 246 |
+
'max_tokens': 32,
|
| 247 |
+
'temperature': 0.1
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
print("\n" + "=" * 70)
|
| 251 |
+
print("PARALLEL BATCHED INFERENCE")
|
| 252 |
+
print("=" * 70)
|
| 253 |
+
print(f"Model: {model_path}")
|
| 254 |
+
print(f"Number of batches: {num_batches}")
|
| 255 |
+
print(f"Input files: {len(input_files)}")
|
| 256 |
+
print(f"Output: {output_file}")
|
| 257 |
+
print("=" * 70 + "\n")
|
| 258 |
+
|
| 259 |
+
# Create directories
|
| 260 |
+
temp_dir = "./batches"
|
| 261 |
+
output_dir = "./batch_outputs"
|
| 262 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 263 |
+
|
| 264 |
+
# Step 1: Split into batches
|
| 265 |
+
batch_files = split_files_into_batches(input_files, num_batches, temp_dir)
|
| 266 |
+
|
| 267 |
+
if not batch_files:
|
| 268 |
+
print("✗ No batches created!")
|
| 269 |
+
return
|
| 270 |
+
|
| 271 |
+
# Step 2: Run parallel inference
|
| 272 |
+
print("=" * 70)
|
| 273 |
+
print("STEP 2: Running parallel inference")
|
| 274 |
+
print("=" * 70)
|
| 275 |
+
print(f"Starting {len(batch_files)} worker processes...\n")
|
| 276 |
+
|
| 277 |
+
# Create output file paths
|
| 278 |
+
batch_output_files = [
|
| 279 |
+
os.path.join(output_dir, f"output_{i:04d}.jsonl")
|
| 280 |
+
for i in range(len(batch_files))
|
| 281 |
+
]
|
| 282 |
+
|
| 283 |
+
# Create worker arguments
|
| 284 |
+
worker_args = [
|
| 285 |
+
(i, batch_files[i], batch_output_files[i], model_path, config)
|
| 286 |
+
for i in range(len(batch_files))
|
| 287 |
+
]
|
| 288 |
+
|
| 289 |
+
# Run workers in parallel
|
| 290 |
+
with mp.Pool(processes=len(batch_files)) as pool:
|
| 291 |
+
results = pool.starmap(worker_process, worker_args)
|
| 292 |
+
|
| 293 |
+
# Summary
|
| 294 |
+
total_processed = sum(r[0] for r in results)
|
| 295 |
+
total_skipped = sum(r[1] for r in results)
|
| 296 |
+
|
| 297 |
+
print("\n" + "=" * 70)
|
| 298 |
+
print("Parallel inference completed!")
|
| 299 |
+
print(f" New inferences: {total_processed:,}")
|
| 300 |
+
print(f" Kept existing: {total_skipped:,}")
|
| 301 |
+
print(f" Total: {total_processed + total_skipped:,}")
|
| 302 |
+
print("=" * 70)
|
| 303 |
+
|
| 304 |
+
# Step 3: Merge outputs
|
| 305 |
+
merge_batch_outputs(batch_output_files, output_file)
|
| 306 |
+
|
| 307 |
+
# Cleanup option
|
| 308 |
+
print("\nCleanup temporary files?")
|
| 309 |
+
response = input("Delete batch files and outputs? (y/n): ")
|
| 310 |
+
if response.lower() == 'y':
|
| 311 |
+
import shutil
|
| 312 |
+
try:
|
| 313 |
+
shutil.rmtree(temp_dir)
|
| 314 |
+
shutil.rmtree(output_dir)
|
| 315 |
+
print("✓ Temporary files deleted")
|
| 316 |
+
except Exception as e:
|
| 317 |
+
print(f"✗ Error deleting temp files: {e}")
|
| 318 |
+
|
| 319 |
+
print("\n" + "=" * 70)
|
| 320 |
+
print("ALL DONE!")
|
| 321 |
+
print("=" * 70)
|
| 322 |
+
print(f"Final output: {output_file}")
|
| 323 |
+
print("=" * 70 + "\n")
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
if __name__ == "__main__":
|
| 327 |
+
|
| 328 |
+
# ========================================
|
| 329 |
+
# CONFIGURATION
|
| 330 |
+
# ========================================
|
| 331 |
+
|
| 332 |
+
# Path to your GGUF model
|
| 333 |
+
model_path = "models/qwen2.5-sumobot-half-fp16.gguf"
|
| 334 |
+
|
| 335 |
+
# Input files to process
|
| 336 |
+
input_files = [
|
| 337 |
+
"../dataset/temp.jsonl", # Original dataset
|
| 338 |
+
"new_states_to_infer.jsonl", # New states
|
| 339 |
+
]
|
| 340 |
+
|
| 341 |
+
# Output file
|
| 342 |
+
output_file = "complete_dataset.jsonl"
|
| 343 |
+
|
| 344 |
+
# Number of parallel batches
|
| 345 |
+
num_batches = 10
|
| 346 |
+
|
| 347 |
+
# Model configuration
|
| 348 |
+
config = {
|
| 349 |
+
'n_ctx': 512, # Context size
|
| 350 |
+
'n_threads': 4, # Threads per worker
|
| 351 |
+
'n_gpu_layers': -1, # GPU layers (0 = CPU only, 35+ = full GPU)
|
| 352 |
+
'max_tokens': 32, # Max tokens to generate
|
| 353 |
+
'top_p': 0.95
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
# ========================================
|
| 357 |
+
# RUN
|
| 358 |
+
# ========================================
|
| 359 |
+
|
| 360 |
+
print("\nConfiguration:")
|
| 361 |
+
print(f" Model: {model_path}")
|
| 362 |
+
print(f" Batches: {num_batches}")
|
| 363 |
+
print(f" Threads per worker: {config['n_threads']}")
|
| 364 |
+
print(f" GPU layers: {config['n_gpu_layers']}")
|
| 365 |
+
print(f" Input files: {input_files}")
|
| 366 |
+
print(f" Output: {output_file}")
|
| 367 |
+
|
| 368 |
+
response = input("\nProceed? (y/n): ")
|
| 369 |
+
if response.lower() == 'y':
|
| 370 |
+
run_parallel_inference(
|
| 371 |
+
input_files=input_files,
|
| 372 |
+
output_file=output_file,
|
| 373 |
+
model_path=model_path,
|
| 374 |
+
num_batches=num_batches,
|
| 375 |
+
config=config
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
print("Cancelled.")
|
llm/expand_dataset.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import numpy as np
|
| 3 |
+
from itertools import product
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
|
| 6 |
+
def load_existing_states(filename):
|
| 7 |
+
"""Load existing states from the dataset file"""
|
| 8 |
+
existing_states = set()
|
| 9 |
+
|
| 10 |
+
print(f"Loading existing states from {filename}...")
|
| 11 |
+
try:
|
| 12 |
+
with open(filename, 'r') as f:
|
| 13 |
+
for line in tqdm(f, desc="Reading existing data"):
|
| 14 |
+
line = line.strip()
|
| 15 |
+
if line:
|
| 16 |
+
data = json.loads(line)
|
| 17 |
+
state = data['state']
|
| 18 |
+
existing_states.add(state)
|
| 19 |
+
except FileNotFoundError:
|
| 20 |
+
print(f"File {filename} not found. Will create new file.")
|
| 21 |
+
|
| 22 |
+
print(f"Loaded {len(existing_states)} existing states")
|
| 23 |
+
return existing_states
|
| 24 |
+
|
| 25 |
+
def parse_state_to_key(state_str):
|
| 26 |
+
"""Convert state string to a normalized key for comparison"""
|
| 27 |
+
# Extract values from state string
|
| 28 |
+
parts = state_str.split(', ')
|
| 29 |
+
values = {}
|
| 30 |
+
for part in parts:
|
| 31 |
+
key, value = part.split('=')
|
| 32 |
+
values[key] = float(value.rstrip('.'))
|
| 33 |
+
|
| 34 |
+
# Create normalized key with rounded values to handle floating point precision
|
| 35 |
+
key = (
|
| 36 |
+
round(values['AngleToEnemy'], 2),
|
| 37 |
+
round(values['AngleToEnemyScore'], 2),
|
| 38 |
+
round(values['DistanceToEnemyScore'], 2),
|
| 39 |
+
round(values['NearBorderArenaScore'], 2),
|
| 40 |
+
round(values['FacingToArena'], 2)
|
| 41 |
+
)
|
| 42 |
+
return key
|
| 43 |
+
|
| 44 |
+
def create_state_string(angle, angle_score, distance_score, border_score, facing):
|
| 45 |
+
"""Create state string from parameters"""
|
| 46 |
+
return f"AngleToEnemy={angle:.2f}, AngleToEnemyScore={angle_score:.2f}, DistanceToEnemyScore={distance_score:.2f}, NearBorderArenaScore={border_score:.2f}, FacingToArena={facing:.2f}."
|
| 47 |
+
|
| 48 |
+
def generate_new_states(existing_states):
|
| 49 |
+
"""Generate all possible state combinations that don't exist in the dataset"""
|
| 50 |
+
|
| 51 |
+
# Define parameter ranges based on requirements
|
| 52 |
+
angle_to_enemy = np.arange(-180, 181, 1) # -180 to 180, step 1
|
| 53 |
+
angle_to_enemy_score = np.arange(0, 1.01, 0.2) # 0 to 1, step 0.2
|
| 54 |
+
distance_to_enemy_score = np.arange(0, 1.01, 0.2) # 0 to 1, step 0.2
|
| 55 |
+
near_border_arena_score = np.arange(0, 1.01, 0.2) # 0 to 1, step 0.2
|
| 56 |
+
facing_to_arena = np.arange(-1, 1.01, 0.2) # -1 to 1, step 0.2
|
| 57 |
+
|
| 58 |
+
# Round to avoid floating point precision issues
|
| 59 |
+
angle_to_enemy_score = np.round(angle_to_enemy_score, 2)
|
| 60 |
+
distance_to_enemy_score = np.round(distance_to_enemy_score, 2)
|
| 61 |
+
near_border_arena_score = np.round(near_border_arena_score, 2)
|
| 62 |
+
facing_to_arena = np.round(facing_to_arena, 2)
|
| 63 |
+
|
| 64 |
+
print("\nParameter ranges:")
|
| 65 |
+
print(f"AngleToEnemy: {len(angle_to_enemy)} values (from -180 to 180)")
|
| 66 |
+
print(f"AngleToEnemyScore: {len(angle_to_enemy_score)} values ({angle_to_enemy_score})")
|
| 67 |
+
print(f"DistanceToEnemyScore: {len(distance_to_enemy_score)} values ({distance_to_enemy_score})")
|
| 68 |
+
print(f"NearBorderArenaScore: {len(near_border_arena_score)} values ({near_border_arena_score})")
|
| 69 |
+
print(f"FacingToArena: {len(facing_to_arena)} values ({facing_to_arena})")
|
| 70 |
+
|
| 71 |
+
total_combinations = (len(angle_to_enemy) * len(angle_to_enemy_score) *
|
| 72 |
+
len(distance_to_enemy_score) * len(near_border_arena_score) *
|
| 73 |
+
len(facing_to_arena))
|
| 74 |
+
print(f"\nTotal possible combinations: {total_combinations:,}")
|
| 75 |
+
|
| 76 |
+
# Parse existing states into keys
|
| 77 |
+
print("\nParsing existing states...")
|
| 78 |
+
existing_keys = set()
|
| 79 |
+
for state_str in tqdm(existing_states, desc="Parsing states"):
|
| 80 |
+
try:
|
| 81 |
+
key = parse_state_to_key(state_str)
|
| 82 |
+
existing_keys.add(key)
|
| 83 |
+
except:
|
| 84 |
+
continue
|
| 85 |
+
|
| 86 |
+
print(f"Parsed {len(existing_keys)} existing state keys")
|
| 87 |
+
|
| 88 |
+
# Generate new states
|
| 89 |
+
new_states = []
|
| 90 |
+
print("\nGenerating new state combinations...")
|
| 91 |
+
|
| 92 |
+
for angle, angle_score, dist_score, border_score, facing in tqdm(
|
| 93 |
+
product(angle_to_enemy, angle_to_enemy_score, distance_to_enemy_score,
|
| 94 |
+
near_border_arena_score, facing_to_arena),
|
| 95 |
+
total=total_combinations,
|
| 96 |
+
desc="Checking combinations"
|
| 97 |
+
):
|
| 98 |
+
# Create key for this combination
|
| 99 |
+
key = (
|
| 100 |
+
round(float(angle), 2),
|
| 101 |
+
round(float(angle_score), 2),
|
| 102 |
+
round(float(dist_score), 2),
|
| 103 |
+
round(float(border_score), 2),
|
| 104 |
+
round(float(facing), 2)
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
# Check if this combination already exists
|
| 108 |
+
if key not in existing_keys:
|
| 109 |
+
state_str = create_state_string(angle, angle_score, dist_score, border_score, facing)
|
| 110 |
+
new_states.append(state_str)
|
| 111 |
+
|
| 112 |
+
print(f"\nFound {len(new_states)} new state combinations to generate")
|
| 113 |
+
return new_states
|
| 114 |
+
|
| 115 |
+
def save_new_states(new_states, output_filename):
|
| 116 |
+
"""Save new states to file with placeholder actions"""
|
| 117 |
+
print(f"\nSaving new states to {output_filename}...")
|
| 118 |
+
|
| 119 |
+
with open(output_filename, 'w') as f:
|
| 120 |
+
for state in tqdm(new_states, desc="Writing new states"):
|
| 121 |
+
# Placeholder action - will be filled by LLM inference
|
| 122 |
+
entry = {
|
| 123 |
+
"state": state,
|
| 124 |
+
"action": "PENDING"
|
| 125 |
+
}
|
| 126 |
+
f.write(json.dumps(entry) + '\n')
|
| 127 |
+
|
| 128 |
+
print(f"Saved {len(new_states)} new states to {output_filename}")
|
| 129 |
+
|
| 130 |
+
def main():
|
| 131 |
+
input_filename = "../dataset/temp.jsonl"
|
| 132 |
+
output_filename = "new_states_to_infer.jsonl"
|
| 133 |
+
|
| 134 |
+
print("=" * 60)
|
| 135 |
+
print("Sumobot Dataset Expansion Script")
|
| 136 |
+
print("=" * 60)
|
| 137 |
+
|
| 138 |
+
# Load existing states
|
| 139 |
+
existing_states = load_existing_states(input_filename)
|
| 140 |
+
|
| 141 |
+
# Generate new states
|
| 142 |
+
new_states = generate_new_states(existing_states)
|
| 143 |
+
|
| 144 |
+
# Save new states
|
| 145 |
+
if new_states:
|
| 146 |
+
save_new_states(new_states, output_filename)
|
| 147 |
+
print("\n" + "=" * 60)
|
| 148 |
+
print(f"SUCCESS! Generated {len(new_states):,} new states")
|
| 149 |
+
print(f"Output file: {output_filename}")
|
| 150 |
+
print("=" * 60)
|
| 151 |
+
print("\nNext steps:")
|
| 152 |
+
print("1. Run inference on these new states with your fine-tuned LLM")
|
| 153 |
+
print("2. Merge the results back with temp.json")
|
| 154 |
+
print("3. Use the complete dataset for vector database")
|
| 155 |
+
else:
|
| 156 |
+
print("\nNo new states to generate - dataset already complete!")
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
main()
|
llm/testng.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding: utf-8
|
| 3 |
+
"""
|
| 4 |
+
Simple inference - outputs exactly what the model generates
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 9 |
+
from peft import PeftModel
|
| 10 |
+
|
| 11 |
+
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
|
| 12 |
+
adapter_folder_name = "adapters/qwen2.5_0.5b_lora_test"
|
| 13 |
+
|
| 14 |
+
print("Loading model...")
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
+
model_name,
|
| 18 |
+
device_map="auto",
|
| 19 |
+
# torch_dtype=torch.bfloat16,
|
| 20 |
+
trust_remote_code=True
|
| 21 |
+
)
|
| 22 |
+
model = PeftModel.from_pretrained(model, adapter_folder_name)
|
| 23 |
+
model = model.merge_and_unload()
|
| 24 |
+
model.eval()
|
| 25 |
+
print("✅ Model loaded!\n")
|
| 26 |
+
|
| 27 |
+
def get_action(state_string):
|
| 28 |
+
"""Generate action - output exactly what model produces"""
|
| 29 |
+
# Format with ChatML template
|
| 30 |
+
prompt = f"""<|im_start|>system
|
| 31 |
+
You are a Sumobot assistant.<|im_end|>
|
| 32 |
+
<|im_start|>user
|
| 33 |
+
sumobot state: {state_string}<|im_end|>
|
| 34 |
+
<|im_start|>assistant
|
| 35 |
+
action:"""
|
| 36 |
+
|
| 37 |
+
print(f"Test state: {state_string}\n")
|
| 38 |
+
print("Generating action...")
|
| 39 |
+
|
| 40 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 41 |
+
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
outputs = model.generate(
|
| 44 |
+
input_ids=inputs["input_ids"],
|
| 45 |
+
attention_mask=inputs["attention_mask"],
|
| 46 |
+
max_new_tokens=50,
|
| 47 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 48 |
+
temperature=0.1,
|
| 49 |
+
do_sample=True
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# with torch.no_grad():
|
| 53 |
+
# outputs = model.generate(
|
| 54 |
+
# **inputs,
|
| 55 |
+
# max_new_tokens=50,
|
| 56 |
+
# temperature=0.1,
|
| 57 |
+
# do_sample=True,
|
| 58 |
+
# pad_token_id=tokenizer.eos_token_id,
|
| 59 |
+
# )
|
| 60 |
+
|
| 61 |
+
# Decode and return AS-IS
|
| 62 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 63 |
+
return result
|
| 64 |
+
|
| 65 |
+
# Test
|
| 66 |
+
state = "AngleToEnemy=-44.41, AngleToEnemyScore=0.71, DistanceToEnemyScore=0.59, NearBorderArenaScore=0.40, FacingToArena=-0.71."
|
| 67 |
+
|
| 68 |
+
print("Input:")
|
| 69 |
+
print(state)
|
| 70 |
+
print("\nRaw Model Output:")
|
| 71 |
+
print(get_action(state))
|
llm/train_llm.ipynb
CHANGED
|
@@ -29,32 +29,45 @@
|
|
| 29 |
},
|
| 30 |
{
|
| 31 |
"cell_type": "code",
|
| 32 |
-
"execution_count":
|
| 33 |
"id": "360596e1",
|
| 34 |
"metadata": {},
|
| 35 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
"source": [
|
| 37 |
-
"import sys, os\n",
|
| 38 |
"sys.path.append(os.path.abspath(\"..\"))\n",
|
| 39 |
"\n",
|
| 40 |
-
"from dataset_helper import export_dataset, get_dataset, get_dataset_dir\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
"\n",
|
| 42 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl.\n",
|
| 43 |
"# If -1, use all lines.\n",
|
| 44 |
-
"max_dataset=
|
| 45 |
-
"
|
| 46 |
"train_validation_ratio=0.9\n",
|
| 47 |
"\n",
|
| 48 |
"# Training args\n",
|
| 49 |
-
"
|
| 50 |
-
"#
|
| 51 |
-
"
|
| 52 |
-
"
|
| 53 |
-
"
|
|
|
|
| 54 |
"learning_rate=5e-5\n",
|
| 55 |
-
"
|
| 56 |
-
"
|
| 57 |
-
"eval_ratio=
|
| 58 |
"\n",
|
| 59 |
"# LoRA\n",
|
| 60 |
"rank=32\n",
|
|
@@ -63,7 +76,7 @@
|
|
| 63 |
"\n",
|
| 64 |
"model_name = \"Qwen/Qwen2.5-0.5B-Instruct\"\n",
|
| 65 |
"\n",
|
| 66 |
-
"adapter_folder_name=\"adapters/qwen2.5_0.
|
| 67 |
"dataset_train_output_path = f\"{get_dataset_dir()}/llm_dataset_train.jsonl\"\n",
|
| 68 |
"dataset_val_output_path = f\"{get_dataset_dir()}/llm_dataset_val.jsonl\""
|
| 69 |
]
|
|
@@ -71,51 +84,71 @@
|
|
| 71 |
{
|
| 72 |
"cell_type": "markdown",
|
| 73 |
"id": "ba6031ad",
|
| 74 |
-
"metadata": {
|
|
|
|
|
|
|
| 75 |
"source": [
|
| 76 |
"# Load Data"
|
| 77 |
]
|
| 78 |
},
|
| 79 |
{
|
| 80 |
"cell_type": "code",
|
| 81 |
-
"execution_count":
|
| 82 |
"id": "e0912c9b",
|
| 83 |
"metadata": {},
|
| 84 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
"source": [
|
| 86 |
"from sklearn.model_selection import train_test_split\n",
|
| 87 |
"\n",
|
| 88 |
"# Load data\n",
|
| 89 |
-
"df, dir = get_dataset()\n",
|
|
|
|
| 90 |
"\n",
|
| 91 |
-
"
|
| 92 |
-
"
|
| 93 |
"\n",
|
| 94 |
-
"
|
| 95 |
-
"
|
| 96 |
-
"
|
| 97 |
-
"
|
| 98 |
-
"
|
| 99 |
-
"
|
|
|
|
| 100 |
"\n",
|
| 101 |
-
"
|
| 102 |
-
"
|
| 103 |
-
" dataset_train_output_path
|
| 104 |
-
" format=\"jsonl_message\",\n",
|
| 105 |
-
" completion_mode=\"short\",\n",
|
| 106 |
-
" include_pos_rot=False\n",
|
| 107 |
-
")\n",
|
| 108 |
-
"\n",
|
| 109 |
-
"export_dataset(\n",
|
| 110 |
-
" df_val,\n",
|
| 111 |
" dataset_val_output_path,\n",
|
| 112 |
-
"
|
| 113 |
-
"
|
| 114 |
-
"
|
| 115 |
-
")
|
| 116 |
-
"\n",
|
| 117 |
-
"print(f\"Saved {len(df_train)} samples to {dataset_train_output_path}\")\n",
|
| 118 |
-
"print(f\"Saved {len(df_val)} samples to {dataset_val_output_path}\")"
|
| 119 |
]
|
| 120 |
},
|
| 121 |
{
|
|
@@ -151,7 +184,8 @@
|
|
| 151 |
]
|
| 152 |
},
|
| 153 |
"id": "2wUC3mmi5tYQ",
|
| 154 |
-
"outputId": "42ede229-d242-40f4-9b00-4adc953040ec"
|
|
|
|
| 155 |
},
|
| 156 |
"outputs": [],
|
| 157 |
"source": [
|
|
@@ -159,9 +193,12 @@
|
|
| 159 |
"import json\n",
|
| 160 |
"import torch\n",
|
| 161 |
"from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, DataCollatorForLanguageModeling\n",
|
| 162 |
-
"from datasets import load_dataset\n",
|
| 163 |
"from peft import LoraConfig, TaskType\n",
|
| 164 |
"from trl import SFTTrainer\n",
|
|
|
|
|
|
|
|
|
|
| 165 |
"\n",
|
| 166 |
"# Device detection (MPS/CUDA)\n",
|
| 167 |
"if torch.backends.mps.is_available():\n",
|
|
@@ -170,6 +207,9 @@
|
|
| 170 |
"elif torch.cuda.is_available():\n",
|
| 171 |
" device = torch.device(\"cuda\")\n",
|
| 172 |
" device_map = {\"\": \"cuda\"}\n",
|
|
|
|
|
|
|
|
|
|
| 173 |
"else:\n",
|
| 174 |
" device = torch.device(\"cpu\")\n",
|
| 175 |
" device_map = {\"\": \"cpu\"}\n",
|
|
@@ -183,8 +223,8 @@
|
|
| 183 |
")\n",
|
| 184 |
"\n",
|
| 185 |
"# Ensure padding token exists\n",
|
| 186 |
-
"
|
| 187 |
-
"
|
| 188 |
"\n",
|
| 189 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 190 |
" model_name,\n",
|
|
@@ -197,7 +237,7 @@
|
|
| 197 |
"lora_config = LoraConfig(\n",
|
| 198 |
" r=rank,\n",
|
| 199 |
" lora_alpha=alpha,\n",
|
| 200 |
-
" target_modules=[\"q_proj\", \"v_proj\"],\n",
|
| 201 |
" lora_dropout=dropout,\n",
|
| 202 |
" bias=\"none\",\n",
|
| 203 |
" task_type=TaskType.CAUSAL_LM,\n",
|
|
@@ -224,22 +264,34 @@
|
|
| 224 |
" text,\n",
|
| 225 |
" truncation=True,\n",
|
| 226 |
" padding=\"max_length\",\n",
|
| 227 |
-
" max_length=
|
| 228 |
" )\n",
|
| 229 |
" tokenized[\"labels\"] = tokenized[\"input_ids\"].copy()\n",
|
| 230 |
" return tokenized\n",
|
| 231 |
"\n",
|
| 232 |
"\n",
|
| 233 |
"# Tokenize train\n",
|
| 234 |
-
"
|
| 235 |
-
"
|
| 236 |
-
"
|
| 237 |
-
")\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
"\n",
|
| 239 |
"# Training setup\n",
|
| 240 |
"training_args = TrainingArguments(\n",
|
| 241 |
" output_dir=adapter_folder_name,\n",
|
| 242 |
-
" per_device_train_batch_size=
|
|
|
|
| 243 |
" gradient_accumulation_steps=gradient_accumulation,\n",
|
| 244 |
" eval_accumulation_steps=eval_accumulation,\n",
|
| 245 |
" learning_rate=learning_rate,\n",
|
|
@@ -269,8 +321,20 @@
|
|
| 269 |
" data_collator=data_collator\n",
|
| 270 |
")\n",
|
| 271 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 272 |
"# Train\n",
|
| 273 |
-
"
|
|
|
|
|
|
|
|
|
|
| 274 |
"\n",
|
| 275 |
"# Save LoRA adapter + tokenizer\n",
|
| 276 |
"trainer.model.save_pretrained(adapter_folder_name)\n",
|
|
@@ -280,7 +344,9 @@
|
|
| 280 |
{
|
| 281 |
"cell_type": "markdown",
|
| 282 |
"id": "cfd63a56",
|
| 283 |
-
"metadata": {
|
|
|
|
|
|
|
| 284 |
"source": [
|
| 285 |
"# Testing"
|
| 286 |
]
|
|
@@ -305,8 +371,16 @@
|
|
| 305 |
" torch_dtype=\"auto\"\n",
|
| 306 |
")\n",
|
| 307 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
"# Load LoRA adapter\n",
|
| 309 |
-
"model = PeftModel.from_pretrained(model,
|
| 310 |
"\n",
|
| 311 |
"# Merge LoRA into the base model (optional if you want a standalone model)\n",
|
| 312 |
"model = model.merge_and_unload()\n",
|
|
@@ -337,7 +411,9 @@
|
|
| 337 |
{
|
| 338 |
"cell_type": "markdown",
|
| 339 |
"id": "05789a5e",
|
| 340 |
-
"metadata": {
|
|
|
|
|
|
|
| 341 |
"source": [
|
| 342 |
"# Save merged model - OPTIONAL"
|
| 343 |
]
|
|
@@ -383,7 +459,7 @@
|
|
| 383 |
"provenance": []
|
| 384 |
},
|
| 385 |
"kernelspec": {
|
| 386 |
-
"display_name": "
|
| 387 |
"language": "python",
|
| 388 |
"name": "python3"
|
| 389 |
},
|
|
@@ -397,7 +473,7 @@
|
|
| 397 |
"name": "python",
|
| 398 |
"nbconvert_exporter": "python",
|
| 399 |
"pygments_lexer": "ipython3",
|
| 400 |
-
"version": "3.10
|
| 401 |
},
|
| 402 |
"widgets": {
|
| 403 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
|
| 29 |
},
|
| 30 |
{
|
| 31 |
"cell_type": "code",
|
| 32 |
+
"execution_count": 7,
|
| 33 |
"id": "360596e1",
|
| 34 |
"metadata": {},
|
| 35 |
+
"outputs": [
|
| 36 |
+
{
|
| 37 |
+
"name": "stdout",
|
| 38 |
+
"output_type": "stream",
|
| 39 |
+
"text": [
|
| 40 |
+
"Set GPU max to 80%\n"
|
| 41 |
+
]
|
| 42 |
+
}
|
| 43 |
+
],
|
| 44 |
"source": [
|
| 45 |
+
"import sys, os, torch\n",
|
| 46 |
"sys.path.append(os.path.abspath(\"..\"))\n",
|
| 47 |
"\n",
|
| 48 |
+
"from dataset_helper import export_dataset,sample_dataset, get_dataset, get_dataset_dir\n",
|
| 49 |
+
"\n",
|
| 50 |
+
"if torch.cuda.is_available():\n",
|
| 51 |
+
" print(\"Set GPU max to 80%\")\n",
|
| 52 |
+
" torch.cuda.set_per_process_memory_fraction(0.8, device=0)\n",
|
| 53 |
"\n",
|
| 54 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl.\n",
|
| 55 |
"# If -1, use all lines.\n",
|
| 56 |
+
"# max_dataset=100_000\n",
|
| 57 |
+
"max_dataset=1_200_000 # Use all lines\n",
|
| 58 |
"train_validation_ratio=0.9\n",
|
| 59 |
"\n",
|
| 60 |
"# Training args\n",
|
| 61 |
+
"train_batches_per_device=2 # adjust based on GPU CUDA / MPS power. Using standard laptop RAM is suggested to set 1. Example (1,2,4,8)\n",
|
| 62 |
+
"val_batches_per_device=2 # adjust based on GPU CUDA / MPS power. Using standard laptop RAM is suggested to set 1. Example (1,2,4,8)\n",
|
| 63 |
+
"# batches_per_device=6\n",
|
| 64 |
+
"num_train_epoch=1 # num train 2-3 is enough\n",
|
| 65 |
+
"gradient_accumulation=2\n",
|
| 66 |
+
"eval_accumulation=2\n",
|
| 67 |
"learning_rate=5e-5\n",
|
| 68 |
+
"save_ratio=0.25 \n",
|
| 69 |
+
"log_ratio=0.1\n",
|
| 70 |
+
"eval_ratio=0.25 #ratio\n",
|
| 71 |
"\n",
|
| 72 |
"# LoRA\n",
|
| 73 |
"rank=32\n",
|
|
|
|
| 76 |
"\n",
|
| 77 |
"model_name = \"Qwen/Qwen2.5-0.5B-Instruct\"\n",
|
| 78 |
"\n",
|
| 79 |
+
"adapter_folder_name=\"adapters/qwen2.5_0.5b_lora_half\"\n",
|
| 80 |
"dataset_train_output_path = f\"{get_dataset_dir()}/llm_dataset_train.jsonl\"\n",
|
| 81 |
"dataset_val_output_path = f\"{get_dataset_dir()}/llm_dataset_val.jsonl\""
|
| 82 |
]
|
|
|
|
| 84 |
{
|
| 85 |
"cell_type": "markdown",
|
| 86 |
"id": "ba6031ad",
|
| 87 |
+
"metadata": {
|
| 88 |
+
"jp-MarkdownHeadingCollapsed": true
|
| 89 |
+
},
|
| 90 |
"source": [
|
| 91 |
"# Load Data"
|
| 92 |
]
|
| 93 |
},
|
| 94 |
{
|
| 95 |
"cell_type": "code",
|
| 96 |
+
"execution_count": 8,
|
| 97 |
"id": "e0912c9b",
|
| 98 |
"metadata": {},
|
| 99 |
+
"outputs": [
|
| 100 |
+
{
|
| 101 |
+
"name": "stdout",
|
| 102 |
+
"output_type": "stream",
|
| 103 |
+
"text": [
|
| 104 |
+
"Reading local dataset from: ../dataset\n",
|
| 105 |
+
"Merged shape: (2363180, 52)\n"
|
| 106 |
+
]
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"name": "stderr",
|
| 110 |
+
"output_type": "stream",
|
| 111 |
+
"text": [
|
| 112 |
+
"100%|██████████| 1200000/1200000 [00:56<00:00, 21205.19it/s]\n"
|
| 113 |
+
]
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"name": "stdout",
|
| 117 |
+
"output_type": "stream",
|
| 118 |
+
"text": [
|
| 119 |
+
"1200000\n",
|
| 120 |
+
"1047340\n",
|
| 121 |
+
"Saved 942606 samples to ../dataset/llm_dataset_train.jsonl\n",
|
| 122 |
+
"Saved 104734 samples to ../dataset/llm_dataset_val.jsonl\n"
|
| 123 |
+
]
|
| 124 |
+
}
|
| 125 |
+
],
|
| 126 |
"source": [
|
| 127 |
"from sklearn.model_selection import train_test_split\n",
|
| 128 |
"\n",
|
| 129 |
"# Load data\n",
|
| 130 |
+
"df, dir = get_dataset(inside_arena=True)\n",
|
| 131 |
+
"temp_dir=f\"{get_dataset_dir()}/temp.jsonl\"\n",
|
| 132 |
"\n",
|
| 133 |
+
"df = df.sample(max_dataset)\n",
|
| 134 |
+
"export_dataset(df, temp_dir, format=\"state_action\", completion_mode=\"short\", include_pos_rot=False)\n",
|
| 135 |
"\n",
|
| 136 |
+
"action_ratio = {\n",
|
| 137 |
+
" \"TL\": 0.35,\n",
|
| 138 |
+
" \"TR\": 0.35,\n",
|
| 139 |
+
" \"FWD\": 0.2,\n",
|
| 140 |
+
" \"DS\": 0.05,\n",
|
| 141 |
+
" \"SK\": 0.05,\n",
|
| 142 |
+
"}\n",
|
| 143 |
"\n",
|
| 144 |
+
"sample_dataset(\n",
|
| 145 |
+
" temp_dir,\n",
|
| 146 |
+
" dataset_train_output_path, \n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
" dataset_val_output_path,\n",
|
| 148 |
+
" max_dataset,\n",
|
| 149 |
+
" action_ratio,\n",
|
| 150 |
+
" train_validation_ratio,\n",
|
| 151 |
+
" )"
|
|
|
|
|
|
|
|
|
|
| 152 |
]
|
| 153 |
},
|
| 154 |
{
|
|
|
|
| 184 |
]
|
| 185 |
},
|
| 186 |
"id": "2wUC3mmi5tYQ",
|
| 187 |
+
"outputId": "42ede229-d242-40f4-9b00-4adc953040ec",
|
| 188 |
+
"scrolled": true
|
| 189 |
},
|
| 190 |
"outputs": [],
|
| 191 |
"source": [
|
|
|
|
| 193 |
"import json\n",
|
| 194 |
"import torch\n",
|
| 195 |
"from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, DataCollatorForLanguageModeling\n",
|
| 196 |
+
"from datasets import load_dataset, load_from_disk\n",
|
| 197 |
"from peft import LoraConfig, TaskType\n",
|
| 198 |
"from trl import SFTTrainer\n",
|
| 199 |
+
"import gc\n",
|
| 200 |
+
"\n",
|
| 201 |
+
"gc.collect()\n",
|
| 202 |
"\n",
|
| 203 |
"# Device detection (MPS/CUDA)\n",
|
| 204 |
"if torch.backends.mps.is_available():\n",
|
|
|
|
| 207 |
"elif torch.cuda.is_available():\n",
|
| 208 |
" device = torch.device(\"cuda\")\n",
|
| 209 |
" device_map = {\"\": \"cuda\"}\n",
|
| 210 |
+
" with torch.no_grad():\n",
|
| 211 |
+
" torch.cuda.empty_cache()\n",
|
| 212 |
+
" torch.cuda.ipc_collect()\n",
|
| 213 |
"else:\n",
|
| 214 |
" device = torch.device(\"cpu\")\n",
|
| 215 |
" device_map = {\"\": \"cpu\"}\n",
|
|
|
|
| 223 |
")\n",
|
| 224 |
"\n",
|
| 225 |
"# Ensure padding token exists\n",
|
| 226 |
+
"tokenizer.pad_token = tokenizer.eos_token\n",
|
| 227 |
+
"tokenizer.padding_side = \"left\"\n",
|
| 228 |
"\n",
|
| 229 |
"model = AutoModelForCausalLM.from_pretrained(\n",
|
| 230 |
" model_name,\n",
|
|
|
|
| 237 |
"lora_config = LoraConfig(\n",
|
| 238 |
" r=rank,\n",
|
| 239 |
" lora_alpha=alpha,\n",
|
| 240 |
+
" target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
|
| 241 |
" lora_dropout=dropout,\n",
|
| 242 |
" bias=\"none\",\n",
|
| 243 |
" task_type=TaskType.CAUSAL_LM,\n",
|
|
|
|
| 264 |
" text,\n",
|
| 265 |
" truncation=True,\n",
|
| 266 |
" padding=\"max_length\",\n",
|
| 267 |
+
" max_length=256\n",
|
| 268 |
" )\n",
|
| 269 |
" tokenized[\"labels\"] = tokenized[\"input_ids\"].copy()\n",
|
| 270 |
" return tokenized\n",
|
| 271 |
"\n",
|
| 272 |
"\n",
|
| 273 |
"# Tokenize train\n",
|
| 274 |
+
"tokenized_path = \"tokenized_dataset\"\n",
|
| 275 |
+
"\n",
|
| 276 |
+
"if os.path.exists(tokenized_path):\n",
|
| 277 |
+
" print(f\"📂 Loading existing tokenized dataset from: {tokenized_path}\")\n",
|
| 278 |
+
" tokenized_datasets = load_from_disk(tokenized_path)\n",
|
| 279 |
+
"else:\n",
|
| 280 |
+
" print(\"⚙️ Tokenizing dataset...\")\n",
|
| 281 |
+
" tokenized_datasets = dataset.map(\n",
|
| 282 |
+
" tokenize,\n",
|
| 283 |
+
" batched=False,\n",
|
| 284 |
+
" desc=\"Tokenizing dataset\"\n",
|
| 285 |
+
" )\n",
|
| 286 |
+
"\n",
|
| 287 |
+
" print(f\"💾 Saving tokenized dataset to: {tokenized_path}\")\n",
|
| 288 |
+
" tokenized_datasets.save_to_disk(tokenized_path)\n",
|
| 289 |
"\n",
|
| 290 |
"# Training setup\n",
|
| 291 |
"training_args = TrainingArguments(\n",
|
| 292 |
" output_dir=adapter_folder_name,\n",
|
| 293 |
+
" per_device_train_batch_size=train_batches_per_device,\n",
|
| 294 |
+
" per_device_eval_batch_size=val_batches_per_device,\n",
|
| 295 |
" gradient_accumulation_steps=gradient_accumulation,\n",
|
| 296 |
" eval_accumulation_steps=eval_accumulation,\n",
|
| 297 |
" learning_rate=learning_rate,\n",
|
|
|
|
| 321 |
" data_collator=data_collator\n",
|
| 322 |
")\n",
|
| 323 |
"\n",
|
| 324 |
+
"# Enable resume ckpt\n",
|
| 325 |
+
"latest_ckpt = None\n",
|
| 326 |
+
"if os.path.isdir(adapter_folder_name):\n",
|
| 327 |
+
" checkpoints = [os.path.join(adapter_folder_name, d) for d in os.listdir(adapter_folder_name) if d.startswith(\"checkpoint-\")]\n",
|
| 328 |
+
" if checkpoints:\n",
|
| 329 |
+
" latest_ckpt = max(checkpoints, key=lambda x: int(x.split(\"-\")[-1]))\n",
|
| 330 |
+
"\n",
|
| 331 |
+
"print(\"Resuming from:\", latest_ckpt)\n",
|
| 332 |
+
"\n",
|
| 333 |
"# Train\n",
|
| 334 |
+
"if latest_ckpt:\n",
|
| 335 |
+
" trainer.train(resume_from_checkpoint=latest_ckpt)\n",
|
| 336 |
+
"else:\n",
|
| 337 |
+
" trainer.train()\n",
|
| 338 |
"\n",
|
| 339 |
"# Save LoRA adapter + tokenizer\n",
|
| 340 |
"trainer.model.save_pretrained(adapter_folder_name)\n",
|
|
|
|
| 344 |
{
|
| 345 |
"cell_type": "markdown",
|
| 346 |
"id": "cfd63a56",
|
| 347 |
+
"metadata": {
|
| 348 |
+
"jp-MarkdownHeadingCollapsed": true
|
| 349 |
+
},
|
| 350 |
"source": [
|
| 351 |
"# Testing"
|
| 352 |
]
|
|
|
|
| 371 |
" torch_dtype=\"auto\"\n",
|
| 372 |
")\n",
|
| 373 |
"\n",
|
| 374 |
+
"latest_ckpt = None\n",
|
| 375 |
+
"if os.path.isdir(adapter_folder_name):\n",
|
| 376 |
+
" checkpoints = [os.path.join(adapter_folder_name, d) for d in os.listdir(adapter_folder_name) if d.startswith(\"checkpoint-\")]\n",
|
| 377 |
+
" if checkpoints:\n",
|
| 378 |
+
" latest_ckpt = max(checkpoints, key=lambda x: int(x.split(\"-\")[-1]))\n",
|
| 379 |
+
"\n",
|
| 380 |
+
"print(\"Resuming from:\", latest_ckpt)\n",
|
| 381 |
+
"\n",
|
| 382 |
"# Load LoRA adapter\n",
|
| 383 |
+
"model = PeftModel.from_pretrained(model, latest_ckpt)\n",
|
| 384 |
"\n",
|
| 385 |
"# Merge LoRA into the base model (optional if you want a standalone model)\n",
|
| 386 |
"model = model.merge_and_unload()\n",
|
|
|
|
| 411 |
{
|
| 412 |
"cell_type": "markdown",
|
| 413 |
"id": "05789a5e",
|
| 414 |
+
"metadata": {
|
| 415 |
+
"jp-MarkdownHeadingCollapsed": true
|
| 416 |
+
},
|
| 417 |
"source": [
|
| 418 |
"# Save merged model - OPTIONAL"
|
| 419 |
]
|
|
|
|
| 459 |
"provenance": []
|
| 460 |
},
|
| 461 |
"kernelspec": {
|
| 462 |
+
"display_name": "venv",
|
| 463 |
"language": "python",
|
| 464 |
"name": "python3"
|
| 465 |
},
|
|
|
|
| 473 |
"name": "python",
|
| 474 |
"nbconvert_exporter": "python",
|
| 475 |
"pygments_lexer": "ipython3",
|
| 476 |
+
"version": "3.12.10"
|
| 477 |
},
|
| 478 |
"widgets": {
|
| 479 |
"application/vnd.jupyter.widget-state+json": {
|
llm/train_llm.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding: utf-8
|
| 3 |
+
|
| 4 |
+
import sys, os, torch
|
| 5 |
+
sys.path.append(os.path.abspath(".."))
|
| 6 |
+
|
| 7 |
+
from dataset_helper import export_dataset, get_dataset, get_dataset_dir
|
| 8 |
+
|
| 9 |
+
if torch.cuda.is_available():
|
| 10 |
+
print("Set GPU max to 80%")
|
| 11 |
+
# torch.cuda.set_per_process_memory_fraction(0.8, device=0)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Amount of dataset lines that will be compiled and converted to dataset.jsonl.
|
| 15 |
+
max_dataset=1_200_000
|
| 16 |
+
train_validation_ratio=0.9
|
| 17 |
+
|
| 18 |
+
# Training args - FIXED FOR RAM CONSTRAINTS
|
| 19 |
+
train_batches_per_device=8 # REDUCED from 16 - critical for RAM
|
| 20 |
+
val_batches_per_device=8 # REDUCED from 2
|
| 21 |
+
gradient_accumulation=8 # INCREASED to maintain effective batch size
|
| 22 |
+
eval_accumulation=8
|
| 23 |
+
num_train_epoch=1
|
| 24 |
+
learning_rate=5e-5
|
| 25 |
+
save_ratio=0.01
|
| 26 |
+
log_ratio=0.005
|
| 27 |
+
eval_ratio=0.25
|
| 28 |
+
|
| 29 |
+
# LoRA
|
| 30 |
+
rank=32
|
| 31 |
+
alpha=64
|
| 32 |
+
dropout=0.01
|
| 33 |
+
|
| 34 |
+
model_name = "Qwen/Qwen2.5-0.5B-Instruct"
|
| 35 |
+
|
| 36 |
+
adapter_folder_name="adapters/qwen2.5_0.5b_lora_half2"
|
| 37 |
+
dataset_train_output_path = f"{get_dataset_dir()}/llm_dataset_train.jsonl"
|
| 38 |
+
dataset_val_output_path = f"{get_dataset_dir()}/llm_dataset_val.jsonl"
|
| 39 |
+
# os.environ["OMP_NUM_THREADS"] = "8"
|
| 40 |
+
# os.environ["MKL_NUM_THREADS"] = "8"
|
| 41 |
+
# os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
| 42 |
+
|
| 43 |
+
from functools import partial
|
| 44 |
+
import json
|
| 45 |
+
import torch
|
| 46 |
+
from transformers import TrainerCallback, BitsAndBytesConfig, AutoTokenizer, AutoModelForCausalLM, TrainingArguments, DataCollatorForLanguageModeling
|
| 47 |
+
from datasets import load_dataset, load_from_disk
|
| 48 |
+
from peft import LoraConfig, TaskType
|
| 49 |
+
from trl import SFTTrainer
|
| 50 |
+
import gc
|
| 51 |
+
from torch.utils.data import DataLoader
|
| 52 |
+
|
| 53 |
+
# Device detection
|
| 54 |
+
if torch.backends.mps.is_available():
|
| 55 |
+
device = torch.device("mps")
|
| 56 |
+
device_map = {"": "mps"}
|
| 57 |
+
elif torch.cuda.is_available():
|
| 58 |
+
device = torch.device("cuda")
|
| 59 |
+
device_map = {"": device}
|
| 60 |
+
print(device_map)
|
| 61 |
+
else:
|
| 62 |
+
device = torch.device("cpu")
|
| 63 |
+
device_map = {"": "cpu"}
|
| 64 |
+
|
| 65 |
+
print(f"Using device: {device}")
|
| 66 |
+
|
| 67 |
+
# FIXED: Proper config for 4-bit quantization
|
| 68 |
+
# bnb_config = BitsAndBytesConfig(
|
| 69 |
+
# load_in_4bit=True,
|
| 70 |
+
# bnb_4bit_use_double_quant=True, # Changed to True for better memory efficiency
|
| 71 |
+
# bnb_4bit_quant_type="nf4",
|
| 72 |
+
# bnb_4bit_compute_dtype=torch.bfloat16,
|
| 73 |
+
# )
|
| 74 |
+
|
| 75 |
+
# Load Qwen2.5-0.5B model & tokenizer
|
| 76 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 77 |
+
model_name,
|
| 78 |
+
trust_remote_code=True,
|
| 79 |
+
# use_fast=True
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# Ensure padding token exists
|
| 83 |
+
if tokenizer.pad_token is None:
|
| 84 |
+
tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token})
|
| 85 |
+
|
| 86 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 87 |
+
model_name,
|
| 88 |
+
# quantization_config=bnb_config,
|
| 89 |
+
device_map="auto",
|
| 90 |
+
torch_dtype=torch.bfloat16,
|
| 91 |
+
trust_remote_code=True
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# Add LoRA adapter
|
| 95 |
+
lora_config = LoraConfig(
|
| 96 |
+
r=rank,
|
| 97 |
+
lora_alpha=alpha,
|
| 98 |
+
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
| 99 |
+
lora_dropout=dropout,
|
| 100 |
+
bias="none",
|
| 101 |
+
task_type=TaskType.CAUSAL_LM,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
processed_path = "processed_sumobot_dataset"
|
| 105 |
+
|
| 106 |
+
def tokenize(example):
|
| 107 |
+
if "messages" in example:
|
| 108 |
+
# Chat-format dataset
|
| 109 |
+
text = tokenizer.apply_chat_template(
|
| 110 |
+
example["messages"],
|
| 111 |
+
tokenize=False,
|
| 112 |
+
add_generation_prompt=False
|
| 113 |
+
)
|
| 114 |
+
elif "text" in example:
|
| 115 |
+
# Plain-text dataset
|
| 116 |
+
text = example["text"]
|
| 117 |
+
elif "state" in example and "action" in example:
|
| 118 |
+
text = f"""<|im_start|>system
|
| 119 |
+
You are a Sumobot assistant.<|im_end|>
|
| 120 |
+
<|im_start|>user
|
| 121 |
+
sumobot state: {example['state']}<|im_end|>
|
| 122 |
+
<|im_start|>assistant
|
| 123 |
+
action: {example['action']}<|im_end|>"""
|
| 124 |
+
else:
|
| 125 |
+
raise ValueError("Example must contain 'messages', 'text', or 'state'+'action'")
|
| 126 |
+
|
| 127 |
+
# Tokenize with shorter max_length for smaller model
|
| 128 |
+
tokenized = tokenizer(
|
| 129 |
+
text,
|
| 130 |
+
truncation=True,
|
| 131 |
+
padding="max_length",
|
| 132 |
+
max_length=256 # SmolLM2 can handle shorter contexts efficiently
|
| 133 |
+
)
|
| 134 |
+
tokenized["labels"] = tokenized["input_ids"].copy()
|
| 135 |
+
return tokenized
|
| 136 |
+
|
| 137 |
+
# IMPROVED: Better batching for tokenization
|
| 138 |
+
if os.path.exists(processed_path):
|
| 139 |
+
print(f"✅ Found cached dataset at {processed_path}, loading...")
|
| 140 |
+
tokenized_datasets = load_from_disk(processed_path)
|
| 141 |
+
else:
|
| 142 |
+
print("📊 Loading and tokenizing dataset...")
|
| 143 |
+
dataset = load_dataset(
|
| 144 |
+
"json",
|
| 145 |
+
data_files={
|
| 146 |
+
"train": dataset_train_output_path,
|
| 147 |
+
"val": dataset_val_output_path
|
| 148 |
+
}
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Tokenize with smaller batches to avoid memory spikes
|
| 152 |
+
tokenized_datasets = dataset.map(
|
| 153 |
+
tokenize,
|
| 154 |
+
batched=False,
|
| 155 |
+
load_from_cache_file=False,
|
| 156 |
+
# batch_size=100, # Reduced from 1000
|
| 157 |
+
# remove_columns=dataset["train"].column_names, # Remove original columns to save memory
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
print("💾 Saving tokenized dataset to disk...")
|
| 161 |
+
tokenized_datasets.save_to_disk(processed_path)
|
| 162 |
+
|
| 163 |
+
# Clear memory after saving
|
| 164 |
+
del dataset
|
| 165 |
+
gc.collect()
|
| 166 |
+
|
| 167 |
+
print(f"Train samples: {len(tokenized_datasets['train'])}")
|
| 168 |
+
print(f"Val samples: {len(tokenized_datasets['val'])}")
|
| 169 |
+
|
| 170 |
+
train_dataloader = DataLoader(
|
| 171 |
+
tokenized_datasets["train"],
|
| 172 |
+
batch_size=train_batches_per_device,
|
| 173 |
+
shuffle=True
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
train_length = len(train_dataloader)
|
| 177 |
+
|
| 178 |
+
train_num = (train_length // gradient_accumulation) * num_train_epoch
|
| 179 |
+
print(f"train length: {train_num}")
|
| 180 |
+
|
| 181 |
+
del train_dataloader
|
| 182 |
+
gc.collect()
|
| 183 |
+
|
| 184 |
+
# FIXED: Proper training arguments for your setup
|
| 185 |
+
training_args = TrainingArguments(
|
| 186 |
+
output_dir=adapter_folder_name,
|
| 187 |
+
|
| 188 |
+
# Batch sizes - CRITICAL FIX
|
| 189 |
+
per_device_train_batch_size=train_batches_per_device, # Small to save RAM
|
| 190 |
+
per_device_eval_batch_size=val_batches_per_device, # Small to save RAM
|
| 191 |
+
gradient_accumulation_steps=gradient_accumulation, # Increased to compensate
|
| 192 |
+
eval_accumulation_steps=eval_accumulation,
|
| 193 |
+
|
| 194 |
+
# Memory optimization - CRITICAL FIXES
|
| 195 |
+
# dataloader_num_workers=0, # No extra workers on Windows
|
| 196 |
+
# dataloader_pin_memory=False, # Disable pinned memory
|
| 197 |
+
gradient_checkpointing=True, # Enable checkpointing
|
| 198 |
+
|
| 199 |
+
# Precision - FIXED TO MATCH MODEL
|
| 200 |
+
fp16=False, # CHANGED: Don't use fp16 with 4-bit
|
| 201 |
+
bf16=True, # CHANGED: Use bf16 to match model
|
| 202 |
+
|
| 203 |
+
# Optimizer - Good choice for memory
|
| 204 |
+
# optim="paged_adamw_8bit", # CHANGED: 8-bit saves more memory than 32bit
|
| 205 |
+
|
| 206 |
+
# Training schedule
|
| 207 |
+
learning_rate=learning_rate,
|
| 208 |
+
num_train_epochs=num_train_epoch,
|
| 209 |
+
warmup_steps=100,
|
| 210 |
+
|
| 211 |
+
# Checkpointing & logging
|
| 212 |
+
save_strategy="steps",
|
| 213 |
+
save_steps=max(50,int(train_num * save_ratio)),
|
| 214 |
+
logging_strategy="steps",
|
| 215 |
+
logging_steps=max(10,int(train_num * log_ratio)),
|
| 216 |
+
logging_dir=f"{adapter_folder_name}/outputs",
|
| 217 |
+
eval_strategy="steps",
|
| 218 |
+
eval_steps=int(train_num * eval_ratio),
|
| 219 |
+
|
| 220 |
+
# Other
|
| 221 |
+
report_to="tensorboard",
|
| 222 |
+
dataloader_num_workers=0,
|
| 223 |
+
|
| 224 |
+
# Additional memory saving options
|
| 225 |
+
# save_total_limit=2, # Keep only 2 checkpoints
|
| 226 |
+
load_best_model_at_end=False, # Don't load model at end (saves memory)
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
data_collator = DataCollatorForLanguageModeling(
|
| 230 |
+
tokenizer=tokenizer,
|
| 231 |
+
mlm=False
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# IMPORTANT: Disable cache for gradient checkpointing
|
| 235 |
+
model.config.use_cache = False
|
| 236 |
+
|
| 237 |
+
# Clear cache before training
|
| 238 |
+
gc.collect()
|
| 239 |
+
if torch.cuda.is_available():
|
| 240 |
+
torch.cuda.empty_cache()
|
| 241 |
+
torch.cuda.ipc_collect()
|
| 242 |
+
|
| 243 |
+
print(f"🔧 Model memory footprint: {model.get_memory_footprint() / 1e9:.2f} GB")
|
| 244 |
+
|
| 245 |
+
class CustomEarlyStopping(TrainerCallback):
|
| 246 |
+
def __init__(self, patience=5, min_delta=0.1):
|
| 247 |
+
self.patience = patience
|
| 248 |
+
self.min_delta = min_delta
|
| 249 |
+
self.best_loss = float("inf")
|
| 250 |
+
self.counter = 0
|
| 251 |
+
|
| 252 |
+
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
|
| 253 |
+
val_loss = metrics.get("eval_loss")
|
| 254 |
+
if val_loss is None:
|
| 255 |
+
return control
|
| 256 |
+
|
| 257 |
+
if val_loss < self.best_loss - self.min_delta:
|
| 258 |
+
self.best_loss = val_loss
|
| 259 |
+
self.counter = 0
|
| 260 |
+
print(f"✅ Improved eval_loss to {val_loss:.4f}")
|
| 261 |
+
else:
|
| 262 |
+
self.counter += 1
|
| 263 |
+
print(f"⚠️ No significant improvement. Counter {self.counter}/{self.patience}")
|
| 264 |
+
|
| 265 |
+
if self.counter >= self.patience:
|
| 266 |
+
print("🛑 Early stopping triggered!")
|
| 267 |
+
control.should_training_stop = True
|
| 268 |
+
|
| 269 |
+
return control
|
| 270 |
+
|
| 271 |
+
# Memory monitoring callback
|
| 272 |
+
class MemoryMonitorCallback(TrainerCallback):
|
| 273 |
+
def on_step_end(self, args, state, control, **kwargs):
|
| 274 |
+
if state.global_step % 50 == 0:
|
| 275 |
+
import psutil
|
| 276 |
+
process = psutil.Process()
|
| 277 |
+
cpu_mem = process.memory_info().rss / 1e9
|
| 278 |
+
gpu_mem = torch.cuda.memory_allocated() / 1e9 if torch.cuda.is_available() else 0
|
| 279 |
+
print(f"📊 Step {state.global_step} | CPU RAM: {cpu_mem:.2f}GB | GPU RAM: {gpu_mem:.2f}GB")
|
| 280 |
+
return control
|
| 281 |
+
|
| 282 |
+
# Trainer
|
| 283 |
+
trainer = SFTTrainer(
|
| 284 |
+
model=model,
|
| 285 |
+
train_dataset=tokenized_datasets["train"],
|
| 286 |
+
eval_dataset=tokenized_datasets["val"],
|
| 287 |
+
peft_config=lora_config,
|
| 288 |
+
args=training_args,
|
| 289 |
+
data_collator=data_collator,
|
| 290 |
+
callbacks=[
|
| 291 |
+
CustomEarlyStopping(patience=10, min_delta=0.01),
|
| 292 |
+
MemoryMonitorCallback()
|
| 293 |
+
]
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
# Enable resume from checkpoint
|
| 297 |
+
latest_ckpt = None
|
| 298 |
+
if os.path.isdir(adapter_folder_name):
|
| 299 |
+
checkpoints = [os.path.join(adapter_folder_name, d) for d in os.listdir(adapter_folder_name) if d.startswith("checkpoint-")]
|
| 300 |
+
if checkpoints:
|
| 301 |
+
latest_ckpt = max(checkpoints, key=lambda x: int(x.split("-")[-1]))
|
| 302 |
+
print(f"📂 Resuming from: {latest_ckpt}")
|
| 303 |
+
|
| 304 |
+
# Train
|
| 305 |
+
print("🚀 Starting training...")
|
| 306 |
+
try:
|
| 307 |
+
if latest_ckpt:
|
| 308 |
+
trainer.train(resume_from_checkpoint=latest_ckpt)
|
| 309 |
+
else:
|
| 310 |
+
trainer.train()
|
| 311 |
+
|
| 312 |
+
# Save LoRA adapter + tokenizer
|
| 313 |
+
print("💾 Saving model...")
|
| 314 |
+
trainer.model.save_pretrained(adapter_folder_name)
|
| 315 |
+
tokenizer.save_pretrained(adapter_folder_name)
|
| 316 |
+
print("��� Training complete!")
|
| 317 |
+
|
| 318 |
+
except Exception as e:
|
| 319 |
+
print(f"❌ Training failed with error: {e}")
|
| 320 |
+
import traceback
|
| 321 |
+
traceback.print_exc()
|
requirements.txt
CHANGED
|
@@ -1,17 +1,19 @@
|
|
| 1 |
-
datasets
|
| 2 |
-
huggingface_hub
|
| 3 |
-
joblib
|
| 4 |
-
numpy
|
| 5 |
-
onnx
|
| 6 |
-
onnxconverter_common
|
| 7 |
-
onnxruntime
|
| 8 |
-
pandas
|
| 9 |
-
peft
|
| 10 |
-
scikit_learn
|
| 11 |
-
tensorflow
|
| 12 |
-
tf2onnx
|
| 13 |
-
tokenizers
|
| 14 |
-
torch
|
| 15 |
-
tqdm
|
| 16 |
-
transformers
|
| 17 |
-
trl
|
|
|
|
|
|
|
|
|
| 1 |
+
datasets
|
| 2 |
+
huggingface_hub
|
| 3 |
+
joblib
|
| 4 |
+
numpy
|
| 5 |
+
onnx
|
| 6 |
+
onnxconverter_common
|
| 7 |
+
onnxruntime
|
| 8 |
+
pandas
|
| 9 |
+
peft
|
| 10 |
+
scikit_learn
|
| 11 |
+
tensorflow
|
| 12 |
+
tf2onnx
|
| 13 |
+
tokenizers
|
| 14 |
+
torch
|
| 15 |
+
tqdm
|
| 16 |
+
transformers
|
| 17 |
+
trl
|
| 18 |
+
tf-keras
|
| 19 |
+
bitsandbytes
|
slm/train_slm.ipynb
CHANGED
|
@@ -10,10 +10,18 @@
|
|
| 10 |
},
|
| 11 |
{
|
| 12 |
"cell_type": "code",
|
| 13 |
-
"execution_count":
|
| 14 |
"id": "8b2f9c4b",
|
| 15 |
"metadata": {},
|
| 16 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
"source": [
|
| 18 |
"import sys, os\n",
|
| 19 |
"sys.path.append(os.path.abspath(\"..\"))\n",
|
|
@@ -25,8 +33,8 @@
|
|
| 25 |
"\n",
|
| 26 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl. \n",
|
| 27 |
"# If -1, use all lines.\n",
|
| 28 |
-
"max_dataset=100\n",
|
| 29 |
-
"
|
| 30 |
"\n",
|
| 31 |
"# Model parameters\n",
|
| 32 |
"block_size = 128\n",
|
|
@@ -53,13 +61,132 @@
|
|
| 53 |
},
|
| 54 |
{
|
| 55 |
"cell_type": "code",
|
| 56 |
-
"execution_count":
|
| 57 |
"id": "71e14182",
|
| 58 |
"metadata": {},
|
| 59 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
"source": [
|
| 61 |
"# Load data\n",
|
| 62 |
-
"df, dir = get_dataset(
|
| 63 |
"\n",
|
| 64 |
"if max_dataset>-1:\n",
|
| 65 |
" df = df.sample(max_dataset)\n",
|
|
@@ -393,7 +520,7 @@
|
|
| 393 |
],
|
| 394 |
"metadata": {
|
| 395 |
"kernelspec": {
|
| 396 |
-
"display_name": "
|
| 397 |
"language": "python",
|
| 398 |
"name": "python3"
|
| 399 |
},
|
|
@@ -407,7 +534,7 @@
|
|
| 407 |
"name": "python",
|
| 408 |
"nbconvert_exporter": "python",
|
| 409 |
"pygments_lexer": "ipython3",
|
| 410 |
-
"version": "3.10
|
| 411 |
}
|
| 412 |
},
|
| 413 |
"nbformat": 4,
|
|
|
|
| 10 |
},
|
| 11 |
{
|
| 12 |
"cell_type": "code",
|
| 13 |
+
"execution_count": 7,
|
| 14 |
"id": "8b2f9c4b",
|
| 15 |
"metadata": {},
|
| 16 |
+
"outputs": [
|
| 17 |
+
{
|
| 18 |
+
"name": "stdout",
|
| 19 |
+
"output_type": "stream",
|
| 20 |
+
"text": [
|
| 21 |
+
"curent dir slm\n"
|
| 22 |
+
]
|
| 23 |
+
}
|
| 24 |
+
],
|
| 25 |
"source": [
|
| 26 |
"import sys, os\n",
|
| 27 |
"sys.path.append(os.path.abspath(\"..\"))\n",
|
|
|
|
| 33 |
"\n",
|
| 34 |
"# Amount of dataset lines that will be compiled and converted to dataset.jsonl. \n",
|
| 35 |
"# If -1, use all lines.\n",
|
| 36 |
+
"# max_dataset=100\n",
|
| 37 |
+
"max_dataset=-1\n",
|
| 38 |
"\n",
|
| 39 |
"# Model parameters\n",
|
| 40 |
"block_size = 128\n",
|
|
|
|
| 61 |
},
|
| 62 |
{
|
| 63 |
"cell_type": "code",
|
| 64 |
+
"execution_count": 8,
|
| 65 |
"id": "71e14182",
|
| 66 |
"metadata": {},
|
| 67 |
+
"outputs": [
|
| 68 |
+
{
|
| 69 |
+
"name": "stdout",
|
| 70 |
+
"output_type": "stream",
|
| 71 |
+
"text": [
|
| 72 |
+
"curent dir slm\n",
|
| 73 |
+
"Reading local dataset from: ../dataset\n",
|
| 74 |
+
"No dataset found locally, will fetch from HuggingFace\n",
|
| 75 |
+
"Fetching dataset from HuggingFace repo: arbyazra123/sumobot_ml\n",
|
| 76 |
+
"Auto-detected 8 CSV files from HuggingFace.\n",
|
| 77 |
+
"dataset/game_logs_bt_vs_fsm.csv\n",
|
| 78 |
+
"dataset/game_logs_bt_vs_fsm_2.csv\n",
|
| 79 |
+
"dataset/game_logs_fsm_vs_bt.csv\n",
|
| 80 |
+
"dataset/game_logs_fsm_vs_fsm.csv\n",
|
| 81 |
+
"dataset/game_logs_mcts_vs_bt.csv\n",
|
| 82 |
+
"dataset/game_logs_mcts_vs_mcts.csv\n",
|
| 83 |
+
"dataset/game_logs_mcts_vs_primitive.csv\n",
|
| 84 |
+
"dataset/game_logs_primitive_vs_ga.csv\n",
|
| 85 |
+
"curent dir slm\n",
|
| 86 |
+
"../dataset\n",
|
| 87 |
+
"curent dir slm\n",
|
| 88 |
+
"Saved: ../dataset\\game_logs_bt_vs_fsm.csv\n",
|
| 89 |
+
"curent dir slm\n",
|
| 90 |
+
"../dataset\n",
|
| 91 |
+
"curent dir slm\n",
|
| 92 |
+
"Saved: ../dataset\\game_logs_bt_vs_fsm_2.csv\n",
|
| 93 |
+
"curent dir slm\n",
|
| 94 |
+
"../dataset\n",
|
| 95 |
+
"curent dir slm\n",
|
| 96 |
+
"Saved: ../dataset\\game_logs_fsm_vs_bt.csv\n",
|
| 97 |
+
"curent dir slm\n",
|
| 98 |
+
"../dataset\n",
|
| 99 |
+
"curent dir slm\n",
|
| 100 |
+
"Saved: ../dataset\\game_logs_fsm_vs_fsm.csv\n"
|
| 101 |
+
]
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"name": "stderr",
|
| 105 |
+
"output_type": "stream",
|
| 106 |
+
"text": [
|
| 107 |
+
"Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
|
| 108 |
+
]
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"name": "stdout",
|
| 112 |
+
"output_type": "stream",
|
| 113 |
+
"text": [
|
| 114 |
+
"curent dir slm\n",
|
| 115 |
+
"../dataset\n",
|
| 116 |
+
"curent dir slm\n",
|
| 117 |
+
"Saved: ../dataset\\game_logs_mcts_vs_bt.csv\n"
|
| 118 |
+
]
|
| 119 |
+
},
|
| 120 |
+
{
|
| 121 |
+
"name": "stderr",
|
| 122 |
+
"output_type": "stream",
|
| 123 |
+
"text": [
|
| 124 |
+
"Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
|
| 125 |
+
]
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"name": "stdout",
|
| 129 |
+
"output_type": "stream",
|
| 130 |
+
"text": [
|
| 131 |
+
"curent dir slm\n",
|
| 132 |
+
"../dataset\n",
|
| 133 |
+
"curent dir slm\n",
|
| 134 |
+
"Saved: ../dataset\\game_logs_mcts_vs_mcts.csv\n"
|
| 135 |
+
]
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"name": "stderr",
|
| 139 |
+
"output_type": "stream",
|
| 140 |
+
"text": [
|
| 141 |
+
"Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
|
| 142 |
+
]
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"name": "stdout",
|
| 146 |
+
"output_type": "stream",
|
| 147 |
+
"text": [
|
| 148 |
+
"curent dir slm\n",
|
| 149 |
+
"../dataset\n",
|
| 150 |
+
"curent dir slm\n",
|
| 151 |
+
"Saved: ../dataset\\game_logs_mcts_vs_primitive.csv\n"
|
| 152 |
+
]
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"name": "stderr",
|
| 156 |
+
"output_type": "stream",
|
| 157 |
+
"text": [
|
| 158 |
+
"Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed. Falling back to regular HTTP download. For better performance, install the package with: `pip install huggingface_hub[hf_xet]` or `pip install hf_xet`\n"
|
| 159 |
+
]
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"name": "stdout",
|
| 163 |
+
"output_type": "stream",
|
| 164 |
+
"text": [
|
| 165 |
+
"curent dir slm\n",
|
| 166 |
+
"../dataset\n",
|
| 167 |
+
"curent dir slm\n",
|
| 168 |
+
"Saved: ../dataset\\game_logs_primitive_vs_ga.csv\n",
|
| 169 |
+
"Merged shape: (2597381, 51)\n"
|
| 170 |
+
]
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"name": "stderr",
|
| 174 |
+
"output_type": "stream",
|
| 175 |
+
"text": [
|
| 176 |
+
"100%|██████████| 2597381/2597381 [02:05<00:00, 20618.22it/s]\n"
|
| 177 |
+
]
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"name": "stdout",
|
| 181 |
+
"output_type": "stream",
|
| 182 |
+
"text": [
|
| 183 |
+
"Saved 2597381 samples to ../dataset/slm_dataset.txt\n"
|
| 184 |
+
]
|
| 185 |
+
}
|
| 186 |
+
],
|
| 187 |
"source": [
|
| 188 |
"# Load data\n",
|
| 189 |
+
"df, dir = get_dataset()\n",
|
| 190 |
"\n",
|
| 191 |
"if max_dataset>-1:\n",
|
| 192 |
" df = df.sample(max_dataset)\n",
|
|
|
|
| 520 |
],
|
| 521 |
"metadata": {
|
| 522 |
"kernelspec": {
|
| 523 |
+
"display_name": "venv",
|
| 524 |
"language": "python",
|
| 525 |
"name": "python3"
|
| 526 |
},
|
|
|
|
| 534 |
"name": "python",
|
| 535 |
"nbconvert_exporter": "python",
|
| 536 |
"pygments_lexer": "ipython3",
|
| 537 |
+
"version": "3.12.10"
|
| 538 |
}
|
| 539 |
},
|
| 540 |
"nbformat": 4,
|
vdb/cleaned_dataset.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e494f13953a7911097d7062490626fee1ac7a82bc6323b848836e28a4468845
|
| 3 |
+
size 312648863
|
vdb/dataset_cleaner.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Clean and normalize actions in the dataset.
|
| 4 |
+
Removes inconsistent outputs and keeps only valid action formats.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import re
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
def clean_action(action):
|
| 12 |
+
"""
|
| 13 |
+
Clean and normalize action string
|
| 14 |
+
|
| 15 |
+
Valid formats:
|
| 16 |
+
- TL (Turn Left) with optional value: TL, TL0.44
|
| 17 |
+
- TR (Turn Right) with optional value: TR, TR0.32
|
| 18 |
+
- FWD (Forward) with optional value: FWD, FWD0.38
|
| 19 |
+
- SK (Skip/Stay)
|
| 20 |
+
- DS (presumably another action)
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
action: Raw action string from LLM
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Cleaned action string or None if invalid
|
| 27 |
+
"""
|
| 28 |
+
if not action:
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
# Remove extra whitespace
|
| 32 |
+
action = action.strip()
|
| 33 |
+
|
| 34 |
+
# Pattern to match valid actions
|
| 35 |
+
# Matches: TL, TR, FWD, SK, DS with optional number
|
| 36 |
+
valid_pattern = r'^(TL|TR|FWD|SK|DS)(\d*\.?\d*)$'
|
| 37 |
+
|
| 38 |
+
# Try to extract valid action
|
| 39 |
+
# Remove common LLM artifacts
|
| 40 |
+
action = action.split(',')[0] # Take first part if comma-separated
|
| 41 |
+
action = action.split(':')[0] # Remove score/explanation parts
|
| 42 |
+
action = action.split('score')[0] # Remove 'score' text
|
| 43 |
+
action = action.strip()
|
| 44 |
+
|
| 45 |
+
# Remove any extra text after the action
|
| 46 |
+
match = re.match(valid_pattern, action)
|
| 47 |
+
if match:
|
| 48 |
+
return action
|
| 49 |
+
|
| 50 |
+
# Try to find valid action within the string
|
| 51 |
+
# Look for TL, TR, FWD, SK, or DS at the start
|
| 52 |
+
for prefix in ['TL', 'TR', 'FWD', 'SK', 'DS']:
|
| 53 |
+
if action.startswith(prefix):
|
| 54 |
+
# Extract the prefix and any following number
|
| 55 |
+
match = re.match(rf'^({prefix})(\d*\.?\d*)', action)
|
| 56 |
+
if match:
|
| 57 |
+
return match.group(0)
|
| 58 |
+
|
| 59 |
+
# If nothing valid found, return None
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
def clean_dataset(input_file, output_file, invalid_output_file=None):
|
| 63 |
+
"""
|
| 64 |
+
Clean dataset by normalizing actions
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
input_file: Input JSONL file
|
| 68 |
+
output_file: Output JSONL file with cleaned actions
|
| 69 |
+
invalid_output_file: Optional file to save invalid entries
|
| 70 |
+
"""
|
| 71 |
+
print("=" * 70)
|
| 72 |
+
print("Dataset Action Cleaner")
|
| 73 |
+
print("=" * 70)
|
| 74 |
+
print(f"Input: {input_file}")
|
| 75 |
+
print(f"Output: {output_file}")
|
| 76 |
+
if invalid_output_file:
|
| 77 |
+
print(f"Invalid entries: {invalid_output_file}")
|
| 78 |
+
print("=" * 70 + "\n")
|
| 79 |
+
|
| 80 |
+
# Count lines first
|
| 81 |
+
print("Counting lines...")
|
| 82 |
+
total_lines = 0
|
| 83 |
+
with open(input_file, 'r') as f:
|
| 84 |
+
for _ in f:
|
| 85 |
+
total_lines += 1
|
| 86 |
+
print(f"Total lines: {total_lines:,}\n")
|
| 87 |
+
|
| 88 |
+
# Process file
|
| 89 |
+
print("Cleaning actions...")
|
| 90 |
+
|
| 91 |
+
valid_count = 0
|
| 92 |
+
invalid_count = 0
|
| 93 |
+
skipped_count = 0
|
| 94 |
+
|
| 95 |
+
action_stats = {}
|
| 96 |
+
invalid_entries = []
|
| 97 |
+
|
| 98 |
+
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
|
| 99 |
+
for line in tqdm(infile, total=total_lines, desc="Processing"):
|
| 100 |
+
line = line.strip()
|
| 101 |
+
if not line:
|
| 102 |
+
skipped_count += 1
|
| 103 |
+
continue
|
| 104 |
+
|
| 105 |
+
try:
|
| 106 |
+
data = json.loads(line)
|
| 107 |
+
state = data.get('state', '')
|
| 108 |
+
raw_action = data.get('action', '')
|
| 109 |
+
|
| 110 |
+
# Clean action
|
| 111 |
+
cleaned_action = clean_action(raw_action)
|
| 112 |
+
|
| 113 |
+
if cleaned_action:
|
| 114 |
+
# Valid action
|
| 115 |
+
result = {
|
| 116 |
+
"state": state,
|
| 117 |
+
"action": cleaned_action
|
| 118 |
+
}
|
| 119 |
+
outfile.write(json.dumps(result) + '\n')
|
| 120 |
+
valid_count += 1
|
| 121 |
+
|
| 122 |
+
# Track action statistics
|
| 123 |
+
action_prefix = re.match(r'^[A-Z]+', cleaned_action)
|
| 124 |
+
if action_prefix:
|
| 125 |
+
prefix = action_prefix.group(0)
|
| 126 |
+
action_stats[prefix] = action_stats.get(prefix, 0) + 1
|
| 127 |
+
else:
|
| 128 |
+
# Invalid action
|
| 129 |
+
invalid_count += 1
|
| 130 |
+
invalid_entries.append({
|
| 131 |
+
"state": state,
|
| 132 |
+
"original_action": raw_action,
|
| 133 |
+
"reason": "Could not parse valid action"
|
| 134 |
+
})
|
| 135 |
+
|
| 136 |
+
except json.JSONDecodeError:
|
| 137 |
+
skipped_count += 1
|
| 138 |
+
continue
|
| 139 |
+
except Exception as e:
|
| 140 |
+
skipped_count += 1
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
# Save invalid entries if requested
|
| 144 |
+
if invalid_output_file and invalid_entries:
|
| 145 |
+
print(f"\nSaving invalid entries to {invalid_output_file}...")
|
| 146 |
+
with open(invalid_output_file, 'w') as f:
|
| 147 |
+
for entry in invalid_entries:
|
| 148 |
+
f.write(json.dumps(entry) + '\n')
|
| 149 |
+
|
| 150 |
+
# Print summary
|
| 151 |
+
print("\n" + "=" * 70)
|
| 152 |
+
print("Cleaning Summary")
|
| 153 |
+
print("=" * 70)
|
| 154 |
+
print(f"Total processed: {total_lines:,}")
|
| 155 |
+
print(f"Valid actions: {valid_count:,} ({100*valid_count/total_lines:.2f}%)")
|
| 156 |
+
print(f"Invalid actions: {invalid_count:,} ({100*invalid_count/total_lines:.2f}%)")
|
| 157 |
+
print(f"Skipped lines: {skipped_count:,}")
|
| 158 |
+
print("\nAction Statistics:")
|
| 159 |
+
for action, count in sorted(action_stats.items()):
|
| 160 |
+
print(f" {action}: {count:,} ({100*count/valid_count:.2f}%)")
|
| 161 |
+
print("=" * 70)
|
| 162 |
+
print(f"\n✓ Cleaned dataset saved to: {output_file}")
|
| 163 |
+
if invalid_output_file and invalid_entries:
|
| 164 |
+
print(f"✓ Invalid entries saved to: {invalid_output_file}")
|
| 165 |
+
print()
|
| 166 |
+
|
| 167 |
+
def preview_cleaning(input_file, num_samples=20):
|
| 168 |
+
"""
|
| 169 |
+
Preview what the cleaning will do without modifying files
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
input_file: Input file to preview
|
| 173 |
+
num_samples: Number of samples to show
|
| 174 |
+
"""
|
| 175 |
+
print("=" * 70)
|
| 176 |
+
print("Cleaning Preview")
|
| 177 |
+
print("=" * 70 + "\n")
|
| 178 |
+
|
| 179 |
+
samples_shown = 0
|
| 180 |
+
|
| 181 |
+
with open(input_file, 'r') as f:
|
| 182 |
+
for line in f:
|
| 183 |
+
if samples_shown >= num_samples:
|
| 184 |
+
break
|
| 185 |
+
|
| 186 |
+
line = line.strip()
|
| 187 |
+
if not line:
|
| 188 |
+
continue
|
| 189 |
+
|
| 190 |
+
try:
|
| 191 |
+
data = json.loads(line)
|
| 192 |
+
raw_action = data.get('action', '')
|
| 193 |
+
cleaned_action = clean_action(raw_action)
|
| 194 |
+
|
| 195 |
+
# Only show entries that will be changed or are invalid
|
| 196 |
+
if raw_action != cleaned_action:
|
| 197 |
+
print(f"Original: '{raw_action}'")
|
| 198 |
+
print(f"Cleaned: '{cleaned_action}'")
|
| 199 |
+
if cleaned_action:
|
| 200 |
+
print(f"Status: ✓ Valid")
|
| 201 |
+
else:
|
| 202 |
+
print(f"Status: ✗ Invalid (will be removed)")
|
| 203 |
+
print("-" * 70)
|
| 204 |
+
samples_shown += 1
|
| 205 |
+
|
| 206 |
+
except:
|
| 207 |
+
continue
|
| 208 |
+
|
| 209 |
+
print(f"\nShown {samples_shown} samples that will be modified/removed\n")
|
| 210 |
+
|
| 211 |
+
if __name__ == "__main__":
|
| 212 |
+
|
| 213 |
+
# ========================================
|
| 214 |
+
# CONFIGURATION
|
| 215 |
+
# ========================================
|
| 216 |
+
|
| 217 |
+
input_file = "../llm/complete_dataset.jsonl"
|
| 218 |
+
output_file = "cleaned_dataset.jsonl"
|
| 219 |
+
invalid_file = "invalid_actions.jsonl" # Set to None to skip saving
|
| 220 |
+
|
| 221 |
+
# ========================================
|
| 222 |
+
# PREVIEW MODE
|
| 223 |
+
# ========================================
|
| 224 |
+
|
| 225 |
+
print("\nDo you want to preview the cleaning first?")
|
| 226 |
+
response = input("Preview before cleaning? (y/n): ")
|
| 227 |
+
|
| 228 |
+
if response.lower() == 'y':
|
| 229 |
+
preview_cleaning(input_file, num_samples=20)
|
| 230 |
+
print("\nDo you want to proceed with the full cleaning?")
|
| 231 |
+
response = input("Clean the dataset? (y/n): ")
|
| 232 |
+
|
| 233 |
+
# ========================================
|
| 234 |
+
# CLEAN DATASET
|
| 235 |
+
# ========================================
|
| 236 |
+
|
| 237 |
+
if response.lower() == 'y':
|
| 238 |
+
clean_dataset(input_file, output_file, invalid_file)
|
| 239 |
+
print("✓ All done!")
|
| 240 |
+
else:
|
| 241 |
+
print("Cancelled.")
|
vdb/import_data.py
CHANGED
|
@@ -18,7 +18,7 @@ def encode_state(state_str):
|
|
| 18 |
], dtype=np.float32)
|
| 19 |
|
| 20 |
BATCH_SIZE = 5000
|
| 21 |
-
DATA_PATH = "
|
| 22 |
|
| 23 |
batch_vecs, batch_actions = [], []
|
| 24 |
with open(DATA_PATH, "r") as f:
|
|
|
|
| 18 |
], dtype=np.float32)
|
| 19 |
|
| 20 |
BATCH_SIZE = 5000
|
| 21 |
+
DATA_PATH = "cleaned_dataset.jsonl"
|
| 22 |
|
| 23 |
batch_vecs, batch_actions = [], []
|
| 24 |
with open(DATA_PATH, "r") as f:
|