File size: 2,995 Bytes
64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a 64382c6 37b0b2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import torch
from datasets import load_dataset
from unsloth import FastLanguageModel, UnslothTrainer, unsloth_train
# Load dataset
file_path = "/content/debug_divas_dataset.json" # Ensure the file path is correct
dataset = load_dataset("json", data_files=file_path)
# Load Unsloth's FastLanguageModel and tokenizer
model_name = "unsloth/mistral-7b-instruct" # Ensure it's an instruct model for translation
model, tokenizer = FastLanguageModel.from_pretrained(
model_name=model_name,
max_seq_length=128,
dtype=torch.float32, # Use float32 to avoid FP16 issues
load_in_4bit=False, # Disable 4-bit quantization if not needed
)
# Preprocessing function
def preprocess_function(examples):
inputs = tokenizer(
[f"Translate the following English sentence to colloquial Tamil: {text}" for text in examples["input"]],
padding="max_length",
truncation=True,
max_length=128,
)
labels = tokenizer(
examples["output"], padding="max_length", truncation=True, max_length=128
)
inputs["labels"] = labels["input_ids"]
return inputs
# Tokenize dataset
tokenized_datasets = dataset.map(preprocess_function, batched=True, remove_columns=dataset["train"].column_names)
# Split dataset
split_datasets = tokenized_datasets["train"].train_test_split(test_size=0.2, seed=42)
train_dataset, test_dataset = split_datasets["train"], split_datasets["test"]
# Initialize UnslothTrainer
trainer = UnslothTrainer(
model=model,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
args={
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"num_train_epochs": 3,
"learning_rate": 2e-5,
"save_strategy": "epoch",
"evaluation_strategy": "epoch",
"fp16": False, # Disable mixed precision training
}
)
# Train with Unsloth
unsloth_train(trainer)
# Save fine-tuned model
trainer.model.save_pretrained("./fine_tuned_model")
tokenizer.save_pretrained("./fine_tuned_model")
# Load fine-tuned model
fine_tuned_model, tokenizer = FastLanguageModel.from_pretrained(
model_name="./fine_tuned_model",
max_seq_length=128,
dtype=torch.float32,
load_in_4bit=False,
)
# Move model to device
device = "cuda" if torch.cuda.is_available() else "cpu"
fine_tuned_model.to(device)
# User input loop for real-time translation
print("Colloquial Tamil Translator (Type 'exit' to quit)")
while True:
input_text = input("Enter an English sentence: ")
if input_text.lower() == "exit":
break
instruction = "Translate the following English sentence to colloquial Tamil"
inputs = tokenizer(f"{instruction}: {input_text}", return_tensors="pt").to(device)
# Generate translation
translated_tokens = fine_tuned_model.generate(**inputs)
translated_text = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
print("Colloquial Tamil Translation:", translated_text)
|