import gradio as gr import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel checkpoint = "gpt2" tokenizer = GPT2Tokenizer.from_pretrained(checkpoint) # Load the fine-tuned model and tokenizer my_model = GPT2LMHeadModel.from_pretrained("jeevana/EmailSubjectLineGeneration") my_tokenizer = GPT2Tokenizer.from_pretrained("jeevana/EmailSubjectLineGeneration") def generate_response(model, tokenizer, prompt): input_ids = tokenizer.encode(prompt, return_tensors="pt",truncation=True, max_length=1000) # Create the attention mask and pad token id attention_mask = torch.ones_like(input_ids) pad_token_id = tokenizer.eos_token_id output = model.generate( input_ids, max_new_tokens=15, min_new_tokens = 1, num_return_sequences=1, attention_mask=attention_mask, pad_token_id=pad_token_id ) response = tokenizer.decode(output[0], skip_special_tokens=True) print("Generated response::", response ) print("len(prompt)::", len(prompt) ) response = response[len(prompt) + 9:] return response def predict(input): prediction = generate_response(my_model, my_tokenizer, input) print("type of response:", type(prediction)) return prediction app = gr.Interface(fn=predict, inputs=[gr.Textbox(label="Email", lines=12)], outputs=[gr.Textbox(label="Subject", lines=3)], title="EmailSubjectLineGeneration", description="EmailSubjectLineGeneration" ) app.launch(share=True, debug=True)