File size: 4,386 Bytes
3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 3773239 c73a574 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | import streamlit as st
from langchain.prompts import PromptTemplate
from langchain_community.llms import CTransformers
import time
# List of blog styles
BLOG_STYLES = [
'Researchers',
'Data Scientist',
'Common People',
'Software Engineers',
'Product Managers',
'Healthcare Professionals',
'Teachers',
'Entrepreneurs',
'Marketers',
'Students'
]
# Function to get response from LLama 2 model
def getLLamaResponse(input_text, no_words, blog_style):
# Initialize the LLama 2 model
llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
model_type='llama',
config={'max_new_tokens': 256, 'temperature': 0.01})
# Define the prompt template
template = """
Write a blog for {blog_style} job profile for a topic {input_text}
within {no_words} words.
"""
prompt = PromptTemplate(input_variables=["blog_style", "input_text", 'no_words'], template=template)
# Generate the response from the LLama 2 model
response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
return response
# Function to generate topics from LLama 2 model
def generate_topics_from_llama(input_text):
# Initialize the LLama 2 model
llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin',
model_type='llama',
config={'max_new_tokens': 256, 'temperature': 0.01})
# Define the prompt template for generating topics
topic_template = """
Generate a list of blog topics based on the keywords: {input_text}
"""
prompt = PromptTemplate(input_variables=["input_text"], template=topic_template)
# Generate the topics from the LLama 2 model
topics_response = llm(prompt.format(input_text=input_text))
# Split the response into a list of topics
topics = topics_response.split('\n')
return [topic.strip() for topic in topics if topic.strip()]
# Set up the Streamlit page configuration
st.set_page_config(page_title="LLAMA 2 Generate Blogs",
page_icon='images/favicon.ico', # Updated favicon source
layout='centered',
initial_sidebar_state='collapsed')
# Display image using st.image
st.image('images/ConcertIDC_Logo_Stack.png', width=50, caption='')
# Page header with favicon icon
st.markdown(
"""
<h1 style="display:flex; align-items:center;">
LLAMA 2 Generate Blogs
</h1>
""",
unsafe_allow_html=True
)
# Placeholder for topics and selected topic
if 'topics' not in st.session_state:
st.session_state.topics = []
if 'selected_topic' not in st.session_state:
st.session_state.selected_topic = None
# Input field for the blog topic
input_text = st.text_input("Enter the Blog Topic Keywords")
# Button to generate topics
generate_topics = st.button("Generate Topics")
# Generate and display topics
if generate_topics:
with st.spinner('Generating topics...'):
st.session_state.topics = generate_topics_from_llama(input_text)
time.sleep(2) # Simulate processing time
# Display generated topics in bullet format
if st.session_state.topics:
# st.markdown("### Suggested Topics")
# for topic in st.session_state.topics:
# st.markdown(f"- {topic}")
# Selection for one of the topics
selected_topic = st.selectbox('Select a Topic', st.session_state.topics)
st.session_state.selected_topic = selected_topic
# Optional input field for the number of words
no_words = st.text_input('Number of Words (optional)', value='')
# Dropdown selection for the blog style
blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0)
# Button to generate the blog content
generate_blog = st.button("Generate Blog Content")
# Display the generated blog response
if generate_blog:
with st.spinner('Generating blog content...'):
if no_words == '':
no_words = '500' # Default to 500 words if not provided
response = getLLamaResponse(st.session_state.selected_topic, no_words, blog_style)
time.sleep(2) # Simulate processing time
st.write(response)
|