| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
| from unsloth import FastLanguageModel |
| import torch |
| from datasets import load_dataset, concatenate_datasets |
|
|
| from trl import SFTTrainer |
| from transformers import TrainingArguments |
| from unsloth import is_bfloat16_supported |
|
|
| max_seq_length = 512 |
| dtype = None |
| load_in_4bit = True |
|
|
| model_id = "llm-jp/llm-jp-3-13b" |
| new_model_id = "llm-jp-3-13b-it" |
| |
| model, tokenizer = FastLanguageModel.from_pretrained( |
| model_name=model_id, |
| dtype=dtype, |
| load_in_4bit=load_in_4bit, |
| trust_remote_code=True, |
| ) |
|
|
| |
| model = FastLanguageModel.get_peft_model( |
| model, |
| r = 32, |
| target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", |
| "gate_proj", "up_proj", "down_proj",], |
| lora_alpha = 32, |
| lora_dropout = 0.05, |
| bias = "none", |
| use_gradient_checkpointing = "unsloth", |
| random_state = 3407, |
| use_rslora = False, |
| loftq_config = None, |
| max_seq_length = max_seq_length, |
| ) |
|
|
|
|
| datasets_list = [ |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-001-1.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-001-2.1.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-001-2.2.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-001-5.1.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-001-5.2.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-002-1.json", |
| "/home/knishizawa/Matsuo_AI/LLM_Course2024/Distribution20241221_all/ichikara-instruction-003-003-1.json" |
| ] |
|
|
| valid_datasets = [] |
|
|
| |
| prompt = """### 指示 |
| {} |
| ### 回答 |
| {}""" |
| EOS_TOKEN = tokenizer.eos_token |
|
|
| |
| def formatting_prompts_func(examples): |
| input_text = examples["text"] |
| output_text = examples["output"] |
| text = prompt.format(input_text, output_text) + EOS_TOKEN |
| return { "formatted_text": text } |
|
|
| |
| for file in datasets_list: |
| try: |
| dataset = load_dataset("json", data_files=file, split="train") |
| dataset = dataset.map(formatting_prompts_func, num_proc=4) |
| valid_datasets.append(dataset) |
| print(f"成功: {file} - {len(dataset)} 件ロード") |
| |
| print(dataset[3]["formatted_text"]) |
| except Exception as e: |
| print(f"エラー: {file} - {e}") |
|
|
| |
| if valid_datasets: |
| merged_dataset = concatenate_datasets(valid_datasets) |
| if len(merged_dataset) > 0: |
| save_dir = "/home/knishizawa/Matsuo_AI/LLM_Course2024/merged_dataset" |
| merged_dataset.save_to_disk(save_dir) |
| print(f"マージされたデータセットが {save_dir} に保存されました。") |
| else: |
| print("マージされたデータセットが空です。") |
| else: |
| print("有効なデータセットが見つかりませんでした。") |
|
|
|
|
| trainer = SFTTrainer( |
| model = model, |
| tokenizer = tokenizer, |
| train_dataset=merged_dataset, |
| max_seq_length = max_seq_length, |
| dataset_text_field="formatted_text", |
| packing = False, |
| args = TrainingArguments( |
| per_device_train_batch_size = 2, |
| gradient_accumulation_steps = 4, |
| num_train_epochs = 1, |
| logging_steps = 10, |
| warmup_steps = 10, |
| save_steps=100, |
| save_total_limit=2, |
| max_steps=-1, |
| learning_rate = 2e-4, |
| fp16 = not is_bfloat16_supported(), |
| bf16 = is_bfloat16_supported(), |
| group_by_length=True, |
| seed = 3407, |
| output_dir = "outputs", |
| report_to = "none", |
| ), |
| ) |
|
|
| |
| trainer_stats = trainer.train() |
| |
| save_dir = "./saved_model" |
| |
| model.save_pretrained(save_dir) |
| |
| tokenizer.save_pretrained(save_dir) |
| print(f"モデルが {save_dir} に保存されました。") |
|
|
|
|
|
|
| |
| |
| |
| import json |
| datasets = [] |
| with open("./elyza-tasks-100-TV_0.jsonl", "r") as f: |
| item = "" |
| for line in f: |
| line = line.strip() |
| item += line |
| if item.endswith("}"): |
| datasets.append(json.loads(item)) |
| item = "" |
|
|
| |
| from tqdm import tqdm |
|
|
| |
| FastLanguageModel.for_inference(model) |
|
|
| results = [] |
| for dt in tqdm(datasets): |
| input = dt["input"] |
|
|
| prompt = f"""### 指示\n{input}\n### 回答\n""" |
|
|
| inputs = tokenizer([prompt], return_tensors = "pt").to(model.device) |
|
|
| outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2) |
| prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1] |
|
|
| results.append({"task_id": dt["task_id"], "input": input, "output": prediction}) |
|
|
| |
| with open(f"{new_model_id}_output.jsonl", 'w', encoding='utf-8') as f: |
| for result in results: |
| json.dump(result, f, ensure_ascii=False) |
| f.write('\n') |
|
|
|
|