# SillokBert-Scratch 프로젝트 5단계: 베이스라인 모델 평가 # ----------------------------------------------------------------- # 아무 훈련도 거치지 않은 범용 BERT 모델(bert-base-multilingual-cased)의 # Perplexity(PPL)를 계산합니다. # ----------------------------------------------------------------- import os import math from pathlib import Path from transformers import ( AutoModelForMaskedLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from datasets import load_dataset, Dataset from itertools import chain def evaluate_baseline_bert(): """bert-base-multilingual-cased 모델의 성능(Perplexity)를 평가합니다.""" # --- 모델 및 경로 설정 --- baseline_model_name = "bert-base-multilingual-cased" test_dataset_file = "/home/work/baro/sillok25060103/preprocessed_corpus/test.txt" eval_output_dir = Path("./baseline_evaluation_output") print("--- 5. Baseline BERT Model Evaluation ---") # --- 모델 및 토크나이저 로드 --- model = AutoModelForMaskedLM.from_pretrained(baseline_model_name) tokenizer = AutoTokenizer.from_pretrained(baseline_model_name) # --- 데이터셋 준비 --- test_dataset = load_dataset('text', data_files={'test': test_dataset_file}) block_size = 512 def tokenize_function(examples): return tokenizer(examples['text'], add_special_tokens=False, return_special_tokens_mask=False) tokenized_datasets = test_dataset.map(tokenize_function, batched=True, num_proc=4, remove_columns=['text']) all_input_ids = list(chain(*tokenized_datasets['test']['input_ids'])) total_length = len(all_input_ids) total_length = (total_length // block_size) * block_size grouped_input_ids = [all_input_ids[i : i + block_size] for i in range(0, total_length, block_size)] eval_dataset = Dataset.from_dict({'input_ids': grouped_input_ids}) print(f"총 {len(eval_dataset)}개의 평가 샘플이 생성되었습니다.") # --- 평가 수행 --- data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) training_args = TrainingArguments(output_dir=eval_output_dir, per_device_eval_batch_size=8, fp16=True) trainer = Trainer( model=model, args=training_args, data_collator=data_collator, eval_dataset=eval_dataset ) metrics = trainer.evaluate() eval_loss = metrics["eval_loss"] perplexity = math.exp(eval_loss) # --- 최종 결과 발표 --- print(f"\n--- {baseline_model_name} 평가 결과 ---") print(f" - 최종 Eval Loss: {eval_loss:.4f}") print(f" - 최종 Perplexity(PPL): {perplexity:.4f}") print("-" * 40) if __name__ == "__main__": evaluate_baseline_bert()