Muh113 commited on
Commit
579e3a4
·
verified ·
1 Parent(s): 4ea832f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -27
app.py CHANGED
@@ -1,36 +1,46 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # Load the tokenizer and model using auto tokenizer and auto model
6
- tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
7
- model = AutoModelForCausalLM.from_pretrained('gpt2-large')
8
-
9
- def generate_blog_post(topic):
10
- prompt = f"Write a blog post about {topic}."
11
- inputs = tokenizer.encode(prompt, return_tensors='pt')
12
-
13
- # Generate text
14
- outputs = model.generate(inputs, max_length=500, num_return_sequences=1, do_sample=True, top_p=0.95, top_k=60)
15
-
16
- # Decode the generated text
17
- text = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
- return text
19
-
20
- # Streamlit interface
21
  st.title("Blog Post Generator")
22
  st.write("Generate a blog post for a given topic using GPT-2 Large.")
23
 
24
- topic = st.text_input("Enter the topic for the blog post:")
 
25
 
 
26
  if st.button("Generate Blog Post"):
27
  if topic:
28
- with st.spinner("Generating..."):
29
- try:
30
- blog_post = generate_blog_post(topic)
31
- st.subheader("Generated Blog Post")
32
- st.write(blog_post)
33
- except Exception as e:
34
- st.error(f"Error generating blog post: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  else:
36
- st.warning("Please enter a topic.")
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ # Load GPT-2 large model and tokenizer
6
+ @st.cache(allow_output_mutation=True)
7
+ def load_model():
8
+ tokenizer = AutoTokenizer.from_pretrained("gpt2-large")
9
+ model = AutoModelForCausalLM.from_pretrained("gpt2-large")
10
+ return tokenizer, model
11
+
12
+ tokenizer, model = load_model()
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  st.title("Blog Post Generator")
15
  st.write("Generate a blog post for a given topic using GPT-2 Large.")
16
 
17
+ # User input for the blog post topic
18
+ topic = st.text_input("Enter the topic for your blog post:")
19
 
20
+ # Generate blog post button
21
  if st.button("Generate Blog Post"):
22
  if topic:
23
+ # Refine the input prompt to guide the model towards generating a blog post
24
+ input_text = f"Write a detailed blog post about {topic}. The post should cover various aspects of the topic and provide valuable information to the readers. Start with an introduction and follow with detailed paragraphs."
25
+
26
+ # Encode the input text
27
+ inputs = tokenizer.encode(input_text, return_tensors="pt")
28
+
29
+ # Generate the blog post using GPT-2 large
30
+ outputs = model.generate(
31
+ inputs,
32
+ max_length=500,
33
+ num_return_sequences=1,
34
+ no_repeat_ngram_size=2,
35
+ early_stopping=True,
36
+ temperature=0.7,
37
+ top_p=0.9
38
+ )
39
+
40
+ # Decode the generated text
41
+ blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
+
43
+ st.write("### Generated Blog Post:")
44
+ st.write(blog_post)
45
  else:
46
+ st.write("Please enter a topic to generate a blog post.")