aarukarthiga commited on
Commit
c73a574
·
1 Parent(s): 3773239

Blog Generation using LLAMA

Browse files
Llama2-Blog-Generation.webm DELETED
Binary file (125 kB)
 
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import streamlit as st
2
  from langchain.prompts import PromptTemplate
3
  from langchain_community.llms import CTransformers
 
4
 
5
  # List of blog styles
6
  BLOG_STYLES = [
@@ -34,41 +35,87 @@ def getLLamaResponse(input_text, no_words, blog_style):
34
  response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
35
  return response
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # Set up the Streamlit page configuration
38
- st.set_page_config(page_title="Generate Blogs",
39
  page_icon='images/favicon.ico', # Updated favicon source
40
  layout='centered',
41
  initial_sidebar_state='collapsed')
 
42
  # Display image using st.image
43
- st.image('images/ConcertIDC_Logo_Stack.png', width=50,caption='')
 
44
  # Page header with favicon icon
45
  st.markdown(
46
  """
47
  <h1 style="display:flex; align-items:center;">
48
- Generate Blogs
49
  </h1>
50
  """,
51
  unsafe_allow_html=True
52
  )
53
 
 
 
 
 
 
 
 
54
  # Input field for the blog topic
55
- input_text = st.text_input("Enter the Blog Topic")
56
 
57
- # Create two columns for additional input fields
58
- col1, col2 = st.columns([5, 5])
59
 
60
- with col1:
61
- # Input field for the number of words
62
- no_words = st.text_input('No of Words')
 
 
 
 
 
 
 
 
63
 
64
- with col2:
 
 
 
 
 
 
65
  # Dropdown selection for the blog style
66
  blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0)
67
 
68
- # Button to generate the blog
69
- submit = st.button("Generate")
70
 
71
- # Display the generated blog response
72
- if submit:
73
- response = getLLamaResponse(input_text, no_words, blog_style)
74
- st.write(response)
 
 
 
 
 
1
  import streamlit as st
2
  from langchain.prompts import PromptTemplate
3
  from langchain_community.llms import CTransformers
4
+ import time
5
 
6
  # List of blog styles
7
  BLOG_STYLES = [
 
35
  response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
36
  return response
37
 
38
+ # Function to generate topics from LLama 2 model
39
+ def generate_topics_from_llama(input_text):
40
+ # Initialize the LLama 2 model
41
+ llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
42
+ model_type='llama',
43
+ config={'max_new_tokens': 256, 'temperature': 0.01})
44
+
45
+ # Define the prompt template for generating topics
46
+ topic_template = """
47
+ Generate a list of blog topics based on the keywords: {input_text}
48
+ """
49
+ prompt = PromptTemplate(input_variables=["input_text"], template=topic_template)
50
+
51
+ # Generate the topics from the LLama 2 model
52
+ topics_response = llm(prompt.format(input_text=input_text))
53
+ # Split the response into a list of topics
54
+ topics = topics_response.split('\n')
55
+ return [topic.strip() for topic in topics if topic.strip()]
56
+
57
  # Set up the Streamlit page configuration
58
+ st.set_page_config(page_title="LLAMA 2 Generate Blogs",
59
  page_icon='images/favicon.ico', # Updated favicon source
60
  layout='centered',
61
  initial_sidebar_state='collapsed')
62
+
63
  # Display image using st.image
64
+ st.image('images/ConcertIDC_Logo_Stack.png', width=50, caption='')
65
+
66
  # Page header with favicon icon
67
  st.markdown(
68
  """
69
  <h1 style="display:flex; align-items:center;">
70
+ LLAMA 2 Generate Blogs
71
  </h1>
72
  """,
73
  unsafe_allow_html=True
74
  )
75
 
76
+ # Placeholder for topics and selected topic
77
+ if 'topics' not in st.session_state:
78
+ st.session_state.topics = []
79
+
80
+ if 'selected_topic' not in st.session_state:
81
+ st.session_state.selected_topic = None
82
+
83
  # Input field for the blog topic
84
+ input_text = st.text_input("Enter the Blog Topic Keywords")
85
 
86
+ # Button to generate topics
87
+ generate_topics = st.button("Generate Topics")
88
 
89
+ # Generate and display topics
90
+ if generate_topics:
91
+ with st.spinner('Generating topics...'):
92
+ st.session_state.topics = generate_topics_from_llama(input_text)
93
+ time.sleep(2) # Simulate processing time
94
+
95
+ # Display generated topics in bullet format
96
+ if st.session_state.topics:
97
+ # st.markdown("### Suggested Topics")
98
+ # for topic in st.session_state.topics:
99
+ # st.markdown(f"- {topic}")
100
 
101
+ # Selection for one of the topics
102
+ selected_topic = st.selectbox('Select a Topic', st.session_state.topics)
103
+ st.session_state.selected_topic = selected_topic
104
+
105
+ # Optional input field for the number of words
106
+ no_words = st.text_input('Number of Words (optional)', value='')
107
+
108
  # Dropdown selection for the blog style
109
  blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0)
110
 
111
+ # Button to generate the blog content
112
+ generate_blog = st.button("Generate Blog Content")
113
 
114
+ # Display the generated blog response
115
+ if generate_blog:
116
+ with st.spinner('Generating blog content...'):
117
+ if no_words == '':
118
+ no_words = '500' # Default to 500 words if not provided
119
+ response = getLLamaResponse(st.session_state.selected_topic, no_words, blog_style)
120
+ time.sleep(2) # Simulate processing time
121
+ st.write(response)
app1.py DELETED
@@ -1,121 +0,0 @@
1
- import streamlit as st
2
- from langchain.prompts import PromptTemplate
3
- from langchain_community.llms import CTransformers
4
- import time
5
-
6
- # List of blog styles
7
- BLOG_STYLES = [
8
- 'Researchers',
9
- 'Data Scientist',
10
- 'Common People',
11
- 'Software Engineers',
12
- 'Product Managers',
13
- 'Healthcare Professionals',
14
- 'Teachers',
15
- 'Entrepreneurs',
16
- 'Marketers',
17
- 'Students'
18
- ]
19
-
20
- # Function to get response from LLama 2 model
21
- def getLLamaResponse(input_text, no_words, blog_style):
22
- # Initialize the LLama 2 model
23
- llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
24
- model_type='llama',
25
- config={'max_new_tokens': 256, 'temperature': 0.01})
26
-
27
- # Define the prompt template
28
- template = """
29
- Write a blog for {blog_style} job profile for a topic {input_text}
30
- within {no_words} words.
31
- """
32
- prompt = PromptTemplate(input_variables=["blog_style", "input_text", 'no_words'], template=template)
33
-
34
- # Generate the response from the LLama 2 model
35
- response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
36
- return response
37
-
38
- # Function to generate topics from LLama 2 model
39
- def generate_topics_from_llama(input_text):
40
- # Initialize the LLama 2 model
41
- llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
42
- model_type='llama',
43
- config={'max_new_tokens': 256, 'temperature': 0.01})
44
-
45
- # Define the prompt template for generating topics
46
- topic_template = """
47
- Generate a list of blog topics based on the keywords: {input_text}
48
- """
49
- prompt = PromptTemplate(input_variables=["input_text"], template=topic_template)
50
-
51
- # Generate the topics from the LLama 2 model
52
- topics_response = llm(prompt.format(input_text=input_text))
53
- # Split the response into a list of topics
54
- topics = topics_response.split('\n')
55
- return [topic.strip() for topic in topics if topic.strip()]
56
-
57
- # Set up the Streamlit page configuration
58
- st.set_page_config(page_title="LLAMA 2 Generate Blogs",
59
- page_icon='images/favicon.ico', # Updated favicon source
60
- layout='centered',
61
- initial_sidebar_state='collapsed')
62
-
63
- # Display image using st.image
64
- st.image('images/ConcertIDC_Logo_Stack.png', width=50, caption='')
65
-
66
- # Page header with favicon icon
67
- st.markdown(
68
- """
69
- <h1 style="display:flex; align-items:center;">
70
- LLAMA 2 Generate Blogs
71
- </h1>
72
- """,
73
- unsafe_allow_html=True
74
- )
75
-
76
- # Placeholder for topics and selected topic
77
- if 'topics' not in st.session_state:
78
- st.session_state.topics = []
79
-
80
- if 'selected_topic' not in st.session_state:
81
- st.session_state.selected_topic = None
82
-
83
- # Input field for the blog topic
84
- input_text = st.text_input("Enter the Blog Topic Keywords")
85
-
86
- # Button to generate topics
87
- generate_topics = st.button("Generate Topics")
88
-
89
- # Generate and display topics
90
- if generate_topics:
91
- with st.spinner('Generating topics...'):
92
- st.session_state.topics = generate_topics_from_llama(input_text)
93
- time.sleep(2) # Simulate processing time
94
-
95
- # Display generated topics in bullet format
96
- if st.session_state.topics:
97
- # st.markdown("### Suggested Topics")
98
- # for topic in st.session_state.topics:
99
- # st.markdown(f"- {topic}")
100
-
101
- # Selection for one of the topics
102
- selected_topic = st.selectbox('Select a Topic', st.session_state.topics)
103
- st.session_state.selected_topic = selected_topic
104
-
105
- # Optional input field for the number of words
106
- no_words = st.text_input('Number of Words (optional)', value='')
107
-
108
- # Dropdown selection for the blog style
109
- blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0)
110
-
111
- # Button to generate the blog content
112
- generate_blog = st.button("Generate Blog Content")
113
-
114
- # Display the generated blog response
115
- if generate_blog:
116
- with st.spinner('Generating blog content...'):
117
- if no_words == '':
118
- no_words = '500' # Default to 500 words if not provided
119
- response = getLLamaResponse(st.session_state.selected_topic, no_words, blog_style)
120
- time.sleep(2) # Simulate processing time
121
- st.write(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
images/ConcertIDC_Logo_Stack.png ADDED
images/favicon.ico ADDED
Llama-Blog-Generation.webm → models/llama-2-7b-chat.ggmlv3.q8_0.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a11956f96c17b694b3fd0e4f3073ee48722debb2ec68754aba55de9b82a90708
3
- size 4094994
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
3
+ size 7160799872