Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from datasets import load_dataset | |
| from transformers import T5Tokenizer, T5ForConditionalGeneration, Trainer, TrainingArguments | |
| # Load dataset | |
| file_path = "dataset.json" # Upload your dataset to the Space and use its path | |
| hf_dataset = load_dataset("json", data_files={"train": file_path, "test": file_path}) | |
| # Load tokenizer and model | |
| model_name = "t5-small" | |
| tokenizer = T5Tokenizer.from_pretrained(model_name) | |
| model = T5ForConditionalGeneration.from_pretrained(model_name) | |
| # Preprocess the dataset | |
| def preprocess_function(examples): | |
| inputs = ["Problema: " + prob for prob in examples["Problema"]] | |
| targets = [resp for resp in examples["Risposta"]] | |
| model_inputs = tokenizer(inputs, max_length=512, truncation=True, padding="max_length") | |
| labels = tokenizer(targets, max_length=128, truncation=True, padding="max_length") | |
| model_inputs["labels"] = labels["input_ids"] | |
| return model_inputs | |
| tokenized_datasets = hf_dataset.map(preprocess_function, batched=True) | |
| # Define training arguments | |
| training_args = TrainingArguments( | |
| output_dir="./results", | |
| evaluation_strategy="epoch", | |
| learning_rate=5e-5, | |
| per_device_train_batch_size=4, | |
| num_train_epochs=1, | |
| weight_decay=0.01, | |
| save_total_limit=1, | |
| logging_dir="./logs", | |
| ) | |
| # Train the model | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized_datasets["train"], | |
| eval_dataset=tokenized_datasets["test"], | |
| ) | |
| trainer.train() | |
| # Define an inference function | |
| def generate_response(problem): | |
| inputs = tokenizer("Problema: " + problem, return_tensors="pt", max_length=512, truncation=True) | |
| outputs = model.generate(inputs["input_ids"], max_length=128, num_beams=4, early_stopping=True) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Create a Gradio interface | |
| interface = gr.Interface( | |
| fn=generate_response, | |
| inputs="text", | |
| outputs="text", | |
| title="Problema-to-Risposta Generator", | |
| description="Enter a problem, and the fine-tuned model will generate a response." | |
| ) | |
| interface.launch() | |