Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, DataCollatorForLanguageModeling | |
| from peft import LoraConfig, get_peft_model | |
| from datasets import Dataset | |
| import gradio as gr # Import Gradio for UI | |
| # Define a dataset of 20 incorrect math memes with corrections and explanations. | |
| data = [ | |
| {"text": "Incorrect: 8 ÷ 2(2+2) = 1? Correct: 8 ÷ 2(2+2) = 16. Explanation: Evaluate parentheses first then perform division and multiplication sequentially."}, | |
| {"text": "Incorrect: 5 + 5 = 20? Correct: 5 + 5 = 10. Explanation: Simple addition error."}, | |
| {"text": "Incorrect: 6 * 6 = 36 but 6 / 6 = 6? Correct: 6 / 6 = 1. Explanation: A number divided by itself equals 1."}, | |
| {"text": "Incorrect: 2^3 = 6? Correct: 2^3 = 8. Explanation: 2 cubed is 8."}, | |
| {"text": "Incorrect: √16 = 5? Correct: √16 = 4. Explanation: The square root of 16 is 4."}, | |
| {"text": "Incorrect: 9 - 3 = 3? Correct: 9 - 3 = 6. Explanation: Correct subtraction yields 6."}, | |
| {"text": "Incorrect: 4 * 4 = 8? Correct: 4 * 4 = 16. Explanation: Multiplication error."}, | |
| {"text": "Incorrect: 10 / 2 = 10? Correct: 10 / 2 = 5. Explanation: Division error."}, | |
| {"text": "Incorrect: 15% of 200 = 50? Correct: 15% of 200 = 30. Explanation: 15% of 200 equals 30."}, | |
| {"text": "Incorrect: 100 / 4 = 20? Correct: 100 / 4 = 25. Explanation: Division error."}, | |
| {"text": "Incorrect: 3 + 7 = 11? Correct: 3 + 7 = 10. Explanation: 3 plus 7 equals 10."}, | |
| {"text": "Incorrect: 2 * 3 + 4 = 14? Correct: 2 * 3 + 4 = 10. Explanation: Follow order of operations: multiply then add."}, | |
| {"text": "Incorrect: 12 / 3 * 2 = 10? Correct: 12 / 3 * 2 = 8. Explanation: 12 divided by 3 is 4; 4 times 2 is 8."}, | |
| {"text": "Incorrect: 7 * 7 = 42? Correct: 7 * 7 = 49. Explanation: Multiplication error."}, | |
| {"text": "Incorrect: 14 - 7 = 8? Correct: 14 - 7 = 7. Explanation: Subtraction error."}, | |
| {"text": "Incorrect: (3 + 2) * 2 = 12? Correct: (3 + 2) * 2 = 10. Explanation: Add first, then multiply."}, | |
| {"text": "Incorrect: 50% of 100 = 60? Correct: 50% of 100 = 50. Explanation: 50% is half of 100."}, | |
| {"text": "Incorrect: 9 + 9 = 18 then 18 / 2 = 10? Correct: 18 / 2 = 9. Explanation: Division error."}, | |
| {"text": "Incorrect: 5! = 100? Correct: 5! = 120. Explanation: 5 factorial is 120."}, | |
| {"text": "Incorrect: 3^2 + 4^2 = 14? Correct: 3^2 + 4^2 = 25. Explanation: 9 + 16 equals 25."} | |
| ] | |
| # Convert the list to a Hugging Face Dataset. | |
| dataset = Dataset.from_list(data) | |
| print("Dataset created with", len(dataset), "examples.") | |
| # Load the GPT-2 tokenizer and model. | |
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
| # GPT-2 does not have an official pad token; use the eos_token. | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model = GPT2LMHeadModel.from_pretrained("gpt2") | |
| # Configure LoRA for efficient fine-tuning. | |
| lora_config = LoraConfig( | |
| task_type="CAUSAL_LM", # For language modeling. | |
| r=8, | |
| lora_alpha=32, | |
| lora_dropout=0.1 | |
| ) | |
| # Wrap the model with LoRA. | |
| model = get_peft_model(model, lora_config) | |
| print("Model loaded and LoRA configured.") | |
| # Tokenize each example. | |
| def tokenize_function(example): | |
| return tokenizer(example["text"], truncation=True, max_length=128, padding="max_length") | |
| tokenized_dataset = dataset.map(tokenize_function, batched=False) | |
| tokenized_dataset.set_format(type="torch", columns=["input_ids", "attention_mask"]) | |
| print("Dataset tokenized.") | |
| training_args = TrainingArguments( | |
| output_dir="output", | |
| per_device_train_batch_size=1, | |
| num_train_epochs=5, # Increase epochs to help the model learn from 20 examples. | |
| logging_steps=1, | |
| save_strategy="epoch", | |
| learning_rate=3e-5, # Slightly lower learning rate. | |
| weight_decay=0.01, | |
| report_to="none" | |
| ) | |
| data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized_dataset, | |
| data_collator=data_collator | |
| ) | |
| print("Starting training...") | |
| trainer.train() | |
| print("Training complete!") | |
| # Gradio UI for testing the model | |
| def correct_math(prompt): | |
| model.eval() # Set model to evaluation mode. | |
| inputs = tokenizer(prompt, return_tensors="pt", padding=True) | |
| input_ids = inputs.input_ids.to(model.device) | |
| attention_mask = inputs.attention_mask.to(model.device) | |
| outputs = model.generate( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| max_new_tokens=50, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_k=50, | |
| top_p=0.90, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| result = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return result | |
| # Create Gradio interface | |
| gr.Interface(fn=correct_math, inputs="text", outputs="text", title="Math Correction Model", description="Enter an incorrect math statement to get the correct answer and explanation.").launch() | |