| |
| |
| """ |
| Gemma 3 4B - Instruction Fine-Tuning for Classification |
| |
| Fine-tuning Gemma 3 4B with instruction format (QA style) for 9-class classification. |
| Uses RS-LoRA (Rank-Stabilized LoRA) to avoid overfitting. |
| |
| Features: |
| - Text preprocessing (remove names, tatweel, emojis) |
| - Instruction tuning format with few-shot examples |
| - RS-LoRA for efficient training |
| - BF16 training on A100 |
| |
| Usage: |
| python finetune_gemma3_classification.py |
| """ |
|
|
| import os |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" |
|
|
| import re |
| import json |
| import numpy as np |
| import torch |
| from datasets import load_dataset, Dataset |
| from transformers import ( |
| AutoTokenizer, |
| AutoModelForCausalLM, |
| TrainingArguments, |
| Trainer, |
| DataCollatorForSeq2Seq, |
| BitsAndBytesConfig, |
| ) |
| from peft import ( |
| LoraConfig, |
| get_peft_model, |
| TaskType, |
| prepare_model_for_kbit_training, |
| ) |
| from sklearn.metrics import accuracy_score, f1_score, precision_recall_fscore_support |
| import warnings |
| warnings.filterwarnings("ignore") |
|
|
| |
| |
| |
| TRAIN_FILE = "/home/houssam-nojoom/.cache/huggingface/hub/datasets--houssamboukhalfa--telecom-ch1/snapshots/be06acac69aa411636dbe0e3bef5f0072e670765/train.csv" |
| TEST_FILE = "/home/houssam-nojoom/.cache/huggingface/hub/datasets--houssamboukhalfa--telecom-ch1/snapshots/be06acac69aa411636dbe0e3bef5f0072e670765/test_file.csv" |
| BASE_MODEL = "google/gemma-3-4b-it" |
|
|
| FT_OUTPUT_DIR = "./gemma3_classification_ft" |
|
|
| MAX_LENGTH = 2048 |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
| print(f"Device: {DEVICE}") |
|
|
| |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
|
|
| |
| LABEL2ID = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8} |
| ID2LABEL = {v: k for k, v in LABEL2ID.items()} |
| NUM_LABELS = len(LABEL2ID) |
|
|
| text_col = "Commentaire client" |
|
|
| |
| |
| |
| SYSTEM_PROMPT = """You are an expert Algerian linguist and data labeler. Your task is to classify customer comments from Algérie Télécom's social media into one of 9 specific categories. |
| |
| ## CLASSES (DETAILED DESCRIPTIONS) |
| - **Class 1 (Wish/Positive Anticipation):** Comments expressing a wish, a hopeful anticipation, or general positive feedback/appreciation for future services or offers. |
| - **Class 2 (Complaint: Equipment/Supply):** Comments complaining about the lack, unavailability, or delay in the supply of necessary equipment (e.g., modems, fiber optics devices). |
| - **Class 3 (Complaint: Marketing/Advertising):** Comments criticizing advertisements, marketing campaigns, or their lack of realism/meaning. |
| - **Class 4 (Complaint: Installation/Deployment):** Comments about delays, stoppages, or failure in service installation, network expansion, or fiber optics deployment (e.g., digging issues). |
| - **Class 5 (Inquiry/Request for Information):** Comments asking for eligibility, connection dates, service status, coverage details, or specific contact information. |
| - **Class 6 (Complaint: Technical Support/Intervention):** Comments regarding delays in repair interventions, issues with technical staff competence, or unsatisfactory customer service agency visits. |
| - **Class 7 (Pricing/Service Enhancement):** Comments focused on pricing, requests for cost reduction, or suggestions for general service/app functionality enhancements. |
| - **Class 8 (Complaint: Total Service Outage/Disconnection):** Comments indicating a complete, sustained loss of service (e.g., no phone, no internet, total disconnection). |
| - **Class 9 (Complaint: Service Performance/Quality):** Comments about technical issues impacting performance (e.g., slow speed, high latency, broken website/portal, coverage claims). |
| |
| Respond with ONLY the class number (1-9). Do not include any explanation.""" |
|
|
| |
| FEW_SHOT_EXAMPLES = [ |
| |
| {"comment": "إن شاء الله يكون عرض صحاب 300 و 500 ميجا فيبر ياربي", "class": "1"}, |
| {"comment": "اتمنى لكم مزيد من التألق", "class": "1"}, |
| |
| {"comment": "زعما جابو المودام ؟", "class": "2"}, |
| {"comment": "وفرو أجهزة مودام الباقي ساهل !", "class": "2"}, |
| |
| {"comment": "إشهار بدون معنه", "class": "3"}, |
| |
| {"comment": "المشروع متوقف منذ اشهر", "class": "4"}, |
| {"comment": "نتمنى تكملو في ايسطو وهران في اقرب وقت رانا نعانو مع ADSL", "class": "4"}, |
| |
| {"comment": "modem", "class": "5"}, |
| {"comment": "يعني كي نطلعها ثلاثون ميغا كارطة تاع مائة الف قداه تحكملي؟", "class": "5"}, |
| |
| {"comment": "عرض 20 ميجا نحيوه مدام مش قادرين تعطيونا حقنا", "class": "6"}, |
| |
| {"comment": "نقصوا الاسعار بزااااف غالية", "class": "7"}, |
| {"comment": "علاه ماديروش في التطبيق خاصية التوقيف المؤقت للانترانات", "class": "7"}, |
| |
| {"comment": "رانا بلا تلفون ولا انترنت", "class": "8"}, |
| {"comment": "ثلاثة اشهر بلا انترنت", "class": "8"}, |
| |
| {"comment": "فضاء الزبون علاه منقدروش نسجلو فيه", "class": "9"}, |
| {"comment": "هل موقع فضاء الزبون متوقف", "class": "9"}, |
| ] |
|
|
| |
| |
| |
| def preprocess_text(text): |
| """ |
| Preprocess text: |
| - Remove Arabic tatweel (ـ) |
| - Remove emojis |
| - Remove user mentions/names |
| - Normalize whitespace |
| - Remove phone numbers |
| - Remove URLs |
| """ |
| if not isinstance(text, str): |
| return "" |
| |
| |
| text = re.sub(r'https?://\S+|www\.\S+', '', text) |
| |
| |
| text = re.sub(r'\S+@\S+', '', text) |
| |
| |
| text = re.sub(r'[\+]?[(]?[0-9]{1,4}[)]?[-\s\./0-9]{6,}', '', text) |
| text = re.sub(r'\b0[567]\d{8}\b', '', text) |
| text = re.sub(r'\b0[23]\d{7,8}\b', '', text) |
| |
| |
| text = re.sub(r'@\w+', '', text) |
| |
| |
| text = re.sub(r'ـ+', '', text) |
| |
| |
| emoji_pattern = re.compile("[" |
| u"\U0001F600-\U0001F64F" |
| u"\U0001F300-\U0001F5FF" |
| u"\U0001F680-\U0001F6FF" |
| u"\U0001F1E0-\U0001F1FF" |
| u"\U00002702-\U000027B0" |
| u"\U000024C2-\U0001F251" |
| u"\U0001f926-\U0001f937" |
| u"\U00010000-\U0010ffff" |
| u"\u2640-\u2642" |
| u"\u2600-\u2B55" |
| u"\u200d" |
| u"\u23cf" |
| u"\u23e9" |
| u"\u231a" |
| u"\ufe0f" |
| u"\u3030" |
| "]+", flags=re.UNICODE) |
| text = emoji_pattern.sub('', text) |
| |
| |
| text = re.sub(r'Algérie Télécom - إتصالات الجزائر', '', text, flags=re.IGNORECASE) |
| text = re.sub(r'Algérie Télécom', '', text, flags=re.IGNORECASE) |
| text = re.sub(r'إتصالات الجزائر', '', text) |
| |
| |
| text = re.sub(r'(.)\1{3,}', r'\1\1\1', text) |
| |
| |
| text = re.sub(r'\s+', ' ', text).strip() |
| |
| return text |
|
|
| |
| |
| |
| |
| FEW_SHOT_STRING = """ |
| Comment: إن شاء الله يكون عرض صحاب 300 و 500 ميجا فيبر ياربي |
| Class: 1 |
| |
| Comment: الف مبروووك.. |
| Class: 1 |
| |
| Comment: - إتصالات الجزائر شكرا اتمنى لكم دوام الصحة والعافية |
| Class: 1 |
| |
| Comment: C une fierté de faire partie de cette grande entreprise Algérienne de haute technologie et haute qualité |
| Class: 1 |
| |
| Comment: اتمنى لكم مزيد من التألق |
| Class: 1 |
| |
| Comment: زعما جابو المودام ؟ |
| Class: 2 |
| |
| Comment: وفرو أجهزة مودام الباقي ساهل ! |
| Class: 2 |
| |
| Comment: واش الفايدة تع العرض هذا هو اصلا لي مودام مهوش متوفر رنا قريب عام وحنا ستناو في جد موام هذا |
| Class: 2 |
| |
| Comment: Depuis un an et demi qu'on a installé w ma kan walou |
| Class: 2 |
| |
| Comment: قتلتونا بلكذب المودام غير متوفر عندي 4 أشهر ملي حطيت الطلب في ولاية خنشلة و مزال ماجابوش المودام |
| Class: 2 |
| |
| Comment: عندكم احساس و لا شريوه كما قالو خوتنا لمصريين |
| Class: 3 |
| |
| Comment: Kamel Dahmane الفايبر؟ مستحيل كامل عاجبتهم |
| Class: 3 |
| |
| Comment: ههههه نخلص مليون عادي كون يركبونا الفيبر 😂😂😂😂😂 كرهنا من 144p |
| Class: 3 |
| |
| Comment: إشهار بدون معنه |
| Class: 3 |
| |
| Comment: المشروع متوقف منذ اشهر |
| Class: 4 |
| |
| Comment: نتمنى تكملو في ايسطو وهران في اقرب وقت رانا نعانو مع ADSL |
| Class: 4 |
| |
| Comment: Fibre كاش واحد وصلوله الفيبر؟ |
| Class: 4 |
| |
| Comment: ما هو الجديد وانا مزال ماعنديش الفيبر رغم الطلب ولالحاح |
| Class: 4 |
| |
| Comment: علبة الفيبر راكبة في الحي و لكن لا يوجد توصيل للمنزل للان |
| Class: 4 |
| |
| Comment: modem |
| Class: 5 |
| |
| Comment: يعني كي نطلعها ثلاثون ميغا كارطة تاع مائة الف قداه تحكملي؟ |
| Class: 5 |
| |
| Comment: سآل الأماكن لي ما فيهاش الألياف البصرية إذا جابولنا الألياف السرعة تكون محدودة كيما ف ADSL؟ |
| Class: 5 |
| |
| Comment: ماعرف كاش خبر على ايدوم 4G ماعرف تبقى قرد العش |
| Class: 5 |
| |
| Comment: هل متوفرة في حي عدل 1046 مسكن دويرة |
| Class: 5 |
| |
| Comment: عرض 20 ميجا نحيوه مدام مش قادرين تعطيونا حقنا |
| Class: 6 |
| |
| Comment: 4 سنوات وحنا نخلصو فالدار ماشفنا حتى bonus |
| Class: 6 |
| |
| Comment: لماذا التغيير في الرقم بدون تغيير سرعة التدفق هل من أجل الإشهار وفقط انا غير من 50 ميغا إلا 200 ميغا نظريا تغيرت وفي الواقع بقت قياس أقل من 50 ميغا |
| Class: 6 |
| |
| Comment: انا طلعت تدفق انترنات من 15 الى 20 عبر تطبيق my idoom لاكن سرعة لم تتغير |
| Class: 6 |
| |
| Comment: نقصوا الاسعار بزااااف غالية |
| Class: 7 |
| |
| Comment: علاه ماديروش في التطبيق خاصية التوقيف المؤقت للانترانات |
| Class: 7 |
| |
| Comment: وفرونا من بعد اي ساهلة |
| Class: 7 |
| |
| Comment: لازم ترجعو اتصال بتطبيقات الدفع بلا انترنت و مجاني ريقلوها يا اتصالات الجزائر |
| Class: 7 |
| |
| Comment: Promotion fin d'année ADSL idoom |
| Class: 7 |
| |
| Comment: رانا بلا تلفون ولا انترنت |
| Class: 8 |
| |
| Comment: ثلاثة اشهر بلا انترنت |
| Class: 8 |
| |
| Comment: votre site espace client ne fonctionne pas pourquoi? |
| Class: 8 |
| |
| Comment: ما عندنا الانترنيت ما نخلصوها من الدار |
| Class: 8 |
| |
| Comment: مشكل في 1.200جيق فيبر مدام نوكيا مخرج الانترنت 1جيق فقط كفاش راح تحلو هذا مشكل ومشكل ثاني فضاء الزبون ميمشيش مندو شهر |
| Class: 8 |
| |
| Comment: فضاء الزبون علاه منقدروش نسجلو فيه |
| Class: 9 |
| |
| Comment: هل موقع فضاء الزبون متوقف |
| Class: 9 |
| |
| Comment: ماراهيش توصل الفاتورة لا عن طريق الإيميل ولا عن طريق فضاء الزبون |
| Class: 9 |
| |
| Comment: فضاء الزبون قرابة 20 يوم متوقف!!!!!!؟؟؟؟؟ |
| Class: 9 |
| |
| Comment: برج الكيفان اظنها من العاصمة خارج تغطيتكم....احشموا بركاو بلا كذب....طلعنا الصواريخ للفضاء....بصح بالكذب.... |
| Class: 9""" |
|
|
| def format_few_shot_prompt(comment): |
| """Format prompt with few-shot examples for classification.""" |
| |
| user_prompt = f"""Here are some examples of how to classify comments: |
| |
| {FEW_SHOT_STRING} |
| |
| Now classify this comment: |
| Comment: {comment} |
| Class:""" |
| |
| return user_prompt |
|
|
| def create_instruction_format(example, tokenizer, is_train=True): |
| """ |
| Create instruction format for Gemma 3. |
| |
| For training: includes the answer |
| For inference: no answer |
| """ |
| comment = preprocess_text(example.get(text_col, "")) |
| |
| |
| messages = [ |
| {"role": "user", "content": SYSTEM_PROMPT + "\n\n" + format_few_shot_prompt(comment)} |
| ] |
| |
| if is_train: |
| label = example.get("Class", example.get("labels", 1)) |
| if isinstance(label, str): |
| label = int(label.strip()) |
| |
| messages.append({"role": "assistant", "content": str(label)}) |
| |
| |
| if is_train: |
| |
| text = tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=False |
| ) |
| else: |
| |
| text = tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True |
| ) |
| |
| return text |
|
|
| def prepare_train_dataset(dataset, tokenizer): |
| """Prepare training dataset with instruction format. |
| |
| Only compute loss on the assistant's response (class number), |
| not on the prompt. This is done by setting labels to -100 for prompt tokens. |
| """ |
| |
| def process_example(example): |
| |
| full_text = create_instruction_format(example, tokenizer, is_train=True) |
| |
| |
| prompt_text = create_instruction_format(example, tokenizer, is_train=False) |
| |
| |
| full_tokenized = tokenizer( |
| full_text, |
| truncation=True, |
| max_length=MAX_LENGTH, |
| padding=False, |
| ) |
| |
| prompt_tokenized = tokenizer( |
| prompt_text, |
| truncation=True, |
| max_length=MAX_LENGTH, |
| padding=False, |
| ) |
| |
| |
| prompt_len = len(prompt_tokenized["input_ids"]) |
| labels = [-100] * prompt_len + full_tokenized["input_ids"][prompt_len:] |
| |
| |
| if len(labels) < len(full_tokenized["input_ids"]): |
| labels = labels + full_tokenized["input_ids"][len(labels):] |
| elif len(labels) > len(full_tokenized["input_ids"]): |
| labels = labels[:len(full_tokenized["input_ids"])] |
| |
| full_tokenized["labels"] = labels |
| |
| return full_tokenized |
| |
| return dataset.map(process_example, remove_columns=dataset.column_names) |
|
|
| |
| |
| |
| RS_LORA_CONFIG = { |
| "r": 64, |
| "lora_alpha": 64, |
| "lora_dropout": 0.05, |
| "target_modules": [ |
| "q_proj", "k_proj", "v_proj", "o_proj", |
| "gate_proj", "up_proj", "down_proj", |
| ], |
| "use_rslora": True, |
| } |
|
|
| |
| FT_CONFIG = { |
| "num_epochs": 3, |
| "batch_size": 4, |
| "gradient_accumulation_steps": 8, |
| "learning_rate": 2e-4, |
| "weight_decay": 0.01, |
| "warmup_ratio": 0.1, |
| "max_grad_norm": 1.0, |
| } |
|
|
| |
| |
| |
| print("\n" + "="*70) |
| print("Gemma 3 4B - Instruction Fine-Tuning for Classification") |
| print("="*70 + "\n") |
|
|
| |
| print(f"Loading tokenizer from: {BASE_MODEL}") |
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True) |
|
|
| |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
| tokenizer.pad_token_id = tokenizer.eos_token_id |
|
|
| |
| tokenizer.padding_side = "right" |
|
|
| |
| print(f"Loading model from: {BASE_MODEL}") |
| model = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL, |
| torch_dtype=torch.bfloat16, |
| trust_remote_code=True, |
| device_map="auto", |
| attn_implementation="eager", |
| ) |
|
|
| |
| print("\nApplying RS-LoRA configuration...") |
| lora_config = LoraConfig( |
| task_type=TaskType.CAUSAL_LM, |
| r=RS_LORA_CONFIG["r"], |
| lora_alpha=RS_LORA_CONFIG["lora_alpha"], |
| lora_dropout=RS_LORA_CONFIG["lora_dropout"], |
| target_modules=RS_LORA_CONFIG["target_modules"], |
| bias="none", |
| use_rslora=RS_LORA_CONFIG["use_rslora"], |
| ) |
|
|
| model = get_peft_model(model, lora_config) |
| model.print_trainable_parameters() |
|
|
| |
| print(f"\nLoading training data from: {TRAIN_FILE}") |
| train_ds = load_dataset("csv", data_files=TRAIN_FILE, split="train") |
| print(f"Total training samples: {len(train_ds)}") |
|
|
| |
| print("\nPreprocessing text data...") |
| def preprocess_dataset(example): |
| example["clean_text"] = preprocess_text(example.get(text_col, "")) |
| return example |
|
|
| train_ds = train_ds.map(preprocess_dataset) |
|
|
| |
| print("\nPreprocessing examples:") |
| for i in range(min(3, len(train_ds))): |
| original = train_ds[i].get(text_col, "")[:80] |
| cleaned = train_ds[i].get("clean_text", "")[:80] |
| print(f" Original: {original}...") |
| print(f" Cleaned: {cleaned}...") |
| print() |
|
|
| |
| split = train_ds.train_test_split(test_size=0.01, seed=42) |
| train_split = split["train"] |
| eval_split = split["test"] |
| print(f"Train split: {len(train_split)} | Eval split: {len(eval_split)}") |
|
|
| |
| print("\nPreparing instruction-formatted datasets...") |
| train_dataset = prepare_train_dataset(train_split, tokenizer) |
| eval_dataset = prepare_train_dataset(eval_split, tokenizer) |
|
|
| |
| print("\nExample formatted input (truncated):") |
| example_text = create_instruction_format(train_split[0], tokenizer, is_train=True) |
| print(example_text[:500] + "..." if len(example_text) > 500 else example_text) |
|
|
| |
| data_collator = DataCollatorForSeq2Seq( |
| tokenizer=tokenizer, |
| padding=True, |
| return_tensors="pt", |
| ) |
|
|
| |
| print("\n--- Fine-Tuning Hyperparameters ---") |
| for k, v in FT_CONFIG.items(): |
| print(f" {k}: {v}") |
| print(f"\n--- RS-LoRA Configuration ---") |
| print(f" rank: {RS_LORA_CONFIG['r']}") |
| print(f" alpha: {RS_LORA_CONFIG['lora_alpha']}") |
| print(f" dropout: {RS_LORA_CONFIG['lora_dropout']}") |
| print(f" use_rslora: {RS_LORA_CONFIG['use_rslora']}") |
|
|
| training_args = TrainingArguments( |
| output_dir=FT_OUTPUT_DIR, |
| num_train_epochs=FT_CONFIG["num_epochs"], |
| per_device_train_batch_size=FT_CONFIG["batch_size"], |
| per_device_eval_batch_size=FT_CONFIG["batch_size"], |
| gradient_accumulation_steps=FT_CONFIG["gradient_accumulation_steps"], |
| learning_rate=FT_CONFIG["learning_rate"], |
| weight_decay=FT_CONFIG["weight_decay"], |
| warmup_ratio=FT_CONFIG["warmup_ratio"], |
| max_grad_norm=FT_CONFIG["max_grad_norm"], |
| bf16=True, |
| logging_steps=10, |
| eval_strategy="epoch", |
| save_strategy="epoch", |
| save_total_limit=2, |
| load_best_model_at_end=True, |
| metric_for_best_model="eval_loss", |
| greater_is_better=False, |
| dataloader_num_workers=4, |
| report_to="none", |
| gradient_checkpointing=True, |
| gradient_checkpointing_kwargs={"use_reentrant": False}, |
| ) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| tokenizer=tokenizer, |
| data_collator=data_collator, |
| ) |
|
|
| print("\nStarting fine-tuning...") |
| trainer.train() |
|
|
| print(f"\nSaving model to: {FT_OUTPUT_DIR}") |
| trainer.save_model(FT_OUTPUT_DIR) |
| tokenizer.save_pretrained(FT_OUTPUT_DIR) |
|
|
| |
| config = { |
| "base_model": BASE_MODEL, |
| "num_labels": NUM_LABELS, |
| "id2label": ID2LABEL, |
| "label2id": LABEL2ID, |
| "rs_lora_config": RS_LORA_CONFIG, |
| "ft_config": FT_CONFIG, |
| } |
| with open(os.path.join(FT_OUTPUT_DIR, "training_config.json"), "w") as f: |
| json.dump(config, f, indent=2) |
|
|
| |
| |
| |
| print("\n" + "="*70) |
| print("Inference on Test Set") |
| print("="*70 + "\n") |
|
|
| |
| test_ds = load_dataset("csv", data_files=TEST_FILE, split="train") |
| print(f"Test samples: {len(test_ds)}") |
|
|
| |
| test_ds = test_ds.map(preprocess_dataset) |
|
|
| |
| print("Running inference...") |
| model.eval() |
|
|
| all_preds = [] |
| batch_size = 1 |
|
|
| from tqdm import tqdm |
|
|
| for i in tqdm(range(len(test_ds)), desc="Predicting"): |
| example = test_ds[i] |
| |
| |
| prompt = create_instruction_format(example, tokenizer, is_train=False) |
| |
| |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=MAX_LENGTH) |
| inputs = {k: v.to(model.device) for k, v in inputs.items()} |
| |
| |
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=5, |
| do_sample=False, |
| pad_token_id=tokenizer.pad_token_id, |
| eos_token_id=tokenizer.eos_token_id, |
| ) |
| |
| |
| generated_tokens = outputs[0][inputs["input_ids"].shape[1]:] |
| generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() |
| |
| |
| try: |
| |
| match = re.search(r'\b([1-9])\b', generated_text) |
| if match: |
| pred_class = int(match.group(1)) |
| else: |
| pred_class = 1 |
| except: |
| pred_class = 1 |
| |
| all_preds.append(pred_class) |
|
|
| |
| import pandas as pd |
| test_df = pd.read_csv(TEST_FILE) |
| test_df["Predicted_Class"] = all_preds |
|
|
| output_file = "test_predictions_gemma3.csv" |
| test_df.to_csv(output_file, index=False) |
| print(f"\nPredictions saved to: {output_file}") |
|
|
| |
| print("\nSample predictions:") |
| for i in range(min(10, len(test_df))): |
| text = str(test_df.iloc[i][text_col])[:60] + "..." if len(str(test_df.iloc[i][text_col])) > 60 else str(test_df.iloc[i][text_col]) |
| pred = test_df.iloc[i]["Predicted_Class"] |
| print(f" [{i+1}] Class {pred}: {text}") |
|
|
| |
| print("\nPrediction distribution:") |
| pred_counts = test_df["Predicted_Class"].value_counts().sort_index() |
| for class_label, count in pred_counts.items(): |
| print(f" Class {class_label}: {count} samples ({count/len(test_df)*100:.1f}%)") |
|
|
| |
| |
| |
| print("\n" + "="*70) |
| print("TRAINING COMPLETE!") |
| print("="*70) |
| print(f"\nBase Model: {BASE_MODEL}") |
| print(f"Fine-tuned model saved to: {FT_OUTPUT_DIR}") |
| print(f"Predictions saved to: {output_file}") |
| print(f"\nTraining samples: {len(train_split)}") |
| print(f"Validation samples: {len(eval_split)}") |
| print(f"Test samples: {len(test_df)}") |
| print(f"RS-LoRA rank: {RS_LORA_CONFIG['r']}") |
| print(f"Use RS-LoRA: {RS_LORA_CONFIG['use_rslora']}") |
|
|