Muh113 commited on
Commit
439ce91
·
verified ·
1 Parent(s): 579e3a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -37
app.py CHANGED
@@ -1,46 +1,35 @@
 
1
  import streamlit as st
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
-
5
- # Load GPT-2 large model and tokenizer
6
- @st.cache(allow_output_mutation=True)
7
- def load_model():
8
- tokenizer = AutoTokenizer.from_pretrained("gpt2-large")
9
- model = AutoModelForCausalLM.from_pretrained("gpt2-large")
10
- return tokenizer, model
11
-
12
- tokenizer, model = load_model()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  st.title("Blog Post Generator")
15
  st.write("Generate a blog post for a given topic using GPT-2 Large.")
16
 
17
- # User input for the blog post topic
18
- topic = st.text_input("Enter the topic for your blog post:")
19
 
20
- # Generate blog post button
21
  if st.button("Generate Blog Post"):
22
  if topic:
23
- # Refine the input prompt to guide the model towards generating a blog post
24
- input_text = f"Write a detailed blog post about {topic}. The post should cover various aspects of the topic and provide valuable information to the readers. Start with an introduction and follow with detailed paragraphs."
25
-
26
- # Encode the input text
27
- inputs = tokenizer.encode(input_text, return_tensors="pt")
28
-
29
- # Generate the blog post using GPT-2 large
30
- outputs = model.generate(
31
- inputs,
32
- max_length=500,
33
- num_return_sequences=1,
34
- no_repeat_ngram_size=2,
35
- early_stopping=True,
36
- temperature=0.7,
37
- top_p=0.9
38
- )
39
-
40
- # Decode the generated text
41
- blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
-
43
- st.write("### Generated Blog Post:")
44
- st.write(blog_post)
45
  else:
46
- st.write("Please enter a topic to generate a blog post.")
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ # Load the tokenizer and model
5
+ tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
6
+ model = AutoModelForCausalLM.from_pretrained('gpt2-large')
7
+
8
+ def generate_blog_post(topic):
9
+ prompt = f"Write a blog post about {topic}."
10
+ inputs = tokenizer.encode(prompt, return_tensors='pt')
11
+
12
+ # Generate text
13
+ outputs = model.generate(inputs, max_length=500, num_return_sequences=1, do_sample=True, top_p=0.95, top_k=60)
14
+
15
+ # Decode the generated text
16
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
+ return text
18
+
19
+ # Streamlit interface
20
  st.title("Blog Post Generator")
21
  st.write("Generate a blog post for a given topic using GPT-2 Large.")
22
 
23
+ topic = st.text_input("Enter the topic for the blog post:")
 
24
 
 
25
  if st.button("Generate Blog Post"):
26
  if topic:
27
+ with st.spinner("Generating..."):
28
+ try:
29
+ blog_post = generate_blog_post(topic)
30
+ st.subheader("Generated Blog Post")
31
+ st.write(blog_post)
32
+ except Exception as e:
33
+ st.error(f"Error generating blog post: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  else:
35
+ st.warning("Please enter a topic.")