| import streamlit as st |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel |
|
|
| |
| model_name = 'gpt2-large' |
| tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
| model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
| |
| st.title("GPT-2 Blog Post Generator") |
|
|
| |
| text = st.text_area("Enter your Topic: ") |
|
|
| if text: |
| try: |
| |
| encoded_input = tokenizer(text, return_tensors='pt') |
|
|
| |
| output = model.generate( |
| input_ids=encoded_input['input_ids'], |
| max_length=100, |
| num_return_sequences=1, |
| no_repeat_ngram_size=2, |
| top_p=0.95, |
| top_k=50 |
| ) |
|
|
| |
| generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
| |
| st.subheader("Generated Blog Post") |
| st.write(generated_text) |
| except Exception as e: |
| st.error(f"An error occurred: {e}") |
| |
|
|