| | from transformers import GPT2LMHeadModel, GPT2Tokenizer |
| |
|
| | |
| | model_name = "gpt2" |
| | model = GPT2LMHeadModel.from_pretrained(model_name) |
| | tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
| |
|
| | |
| | model.eval() |
| |
|
| | def generate_recipe(prompt, max_length=150): |
| | |
| | input_ids = tokenizer.encode(prompt, return_tensors="pt") |
| |
|
| | |
| | output = model.generate(input_ids, max_length=max_length, num_beams=5, no_repeat_ngram_size=2, top_k=50, top_p=0.95, temperature=0.7) |
| |
|
| | |
| | generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
| | |
| | return generated_text |
| |
|
| | |
| | user_input = input("Enter a cooking prompt to generate a recipe: ") |
| | generated_recipe = generate_recipe(user_input) |
| | print("Generated Recipe:") |
| | print(generated_recipe) |
| |
|
| |
|