Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| # Load GPT-2 large model and tokenizer | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2-large") | |
| model = AutoModelForCausalLM.from_pretrained("gpt2-large") | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| st.title("Blog Post Generator") | |
| st.write("Generate a blog post for a given topic using GPT-2 Large.") | |
| # User input for the blog post topic | |
| topic = st.text_input("Enter the topic for your blog post:") | |
| # Generate blog post button | |
| if st.button("Generate Blog Post"): | |
| if topic: | |
| # Refine the input prompt to guide the model towards generating a blog post | |
| input_text = f"Write a detailed blog post about {topic}. The post should cover various aspects of the topic and provide valuable information to the readers. Start with an introduction and follow with detailed paragraphs." | |
| # Encode the input text | |
| inputs = tokenizer.encode(input_text, return_tensors="pt") | |
| # Generate the blog post using GPT-2 large | |
| outputs = model.generate( | |
| inputs, | |
| max_length=500, | |
| num_return_sequences=1, | |
| no_repeat_ngram_size=2, | |
| early_stopping=True, | |
| temperature=0.7, | |
| top_p=0.9 | |
| ) | |
| # Decode the generated text | |
| blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| st.write("### Generated Blog Post:") | |
| st.write(blog_post) | |
| else: | |
| st.write("Please enter a topic to generate a blog post.") | |