from transformers import GPT2LMHeadModel, GPT2Tokenizer # Load pre-trained GPT-2 model and tokenizer model_name = "gpt2" # You can use other GPT-2 variants like "gpt2-medium" or "gpt2-large" model = GPT2LMHeadModel.from_pretrained(model_name) tokenizer = GPT2Tokenizer.from_pretrained(model_name) # Set the model to evaluation mode model.eval() def generate_recipe(prompt, max_length=150): # Tokenize the input prompt input_ids = tokenizer.encode(prompt, return_tensors="pt") # Generate text based on the input prompt output = model.generate(input_ids, max_length=max_length, num_beams=5, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7) # Decode the generated output generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Take user input user_input = input("Enter a cooking prompt to generate a recipe: ") generated_recipe = generate_recipe(user_input) print("Generated Recipe:") print(generated_recipe)