Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from langchain.prompts import PromptTemplate | |
| from langchain_community.llms import CTransformers | |
| import time | |
| # List of blog styles | |
| BLOG_STYLES = [ | |
| 'Researchers', | |
| 'Data Scientist', | |
| 'Common People', | |
| 'Software Engineers', | |
| 'Product Managers', | |
| 'Healthcare Professionals', | |
| 'Teachers', | |
| 'Entrepreneurs', | |
| 'Marketers', | |
| 'Students' | |
| ] | |
| # Function to get response from LLama 2 model | |
| def getLLamaResponse(input_text, no_words, blog_style): | |
| # Initialize the LLama 2 model | |
| llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', | |
| model_type='llama', | |
| config={'max_new_tokens': 256, 'temperature': 0.01}) | |
| # Define the prompt template | |
| template = """ | |
| Write a blog for {blog_style} job profile for a topic {input_text} | |
| within {no_words} words. | |
| """ | |
| prompt = PromptTemplate(input_variables=["blog_style", "input_text", 'no_words'], template=template) | |
| # Generate the response from the LLama 2 model | |
| response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words)) | |
| return response | |
| # Function to generate topics from LLama 2 model | |
| def generate_topics_from_llama(input_text): | |
| # Initialize the LLama 2 model | |
| llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', | |
| model_type='llama', | |
| config={'max_new_tokens': 256, 'temperature': 0.01}) | |
| # Define the prompt template for generating topics | |
| topic_template = """ | |
| Generate a list of blog topics based on the keywords: {input_text} | |
| """ | |
| prompt = PromptTemplate(input_variables=["input_text"], template=topic_template) | |
| # Generate the topics from the LLama 2 model | |
| topics_response = llm(prompt.format(input_text=input_text)) | |
| # Split the response into a list of topics | |
| topics = topics_response.split('\n') | |
| return [topic.strip() for topic in topics if topic.strip()] | |
| # Set up the Streamlit page configuration | |
| st.set_page_config(page_title="LLAMA 2 Generate Blogs", | |
| page_icon='images/favicon.ico', # Updated favicon source | |
| layout='centered', | |
| initial_sidebar_state='collapsed') | |
| # Display image using st.image | |
| #st.image('images/ConcertIDC_Logo_Stack.png', width=50, caption='') | |
| # Page header with favicon icon | |
| st.markdown( | |
| """ | |
| <h1 style="display:flex; align-items:center;"> | |
| LLAMA 2 Generate Blogs | |
| </h1> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # Placeholder for topics and selected topic | |
| if 'topics' not in st.session_state: | |
| st.session_state.topics = [] | |
| if 'selected_topic' not in st.session_state: | |
| st.session_state.selected_topic = None | |
| # Input field for the blog topic | |
| input_text = st.text_input("Enter the Blog Topic Keywords") | |
| # Button to generate topics | |
| generate_topics = st.button("Generate Topics") | |
| # Generate and display topics | |
| if generate_topics: | |
| with st.spinner('Generating topics...'): | |
| st.session_state.topics = generate_topics_from_llama(input_text) | |
| time.sleep(2) # Simulate processing time | |
| # Display generated topics in bullet format | |
| if st.session_state.topics: | |
| # st.markdown("### Suggested Topics") | |
| # for topic in st.session_state.topics: | |
| # st.markdown(f"- {topic}") | |
| # Selection for one of the topics | |
| selected_topic = st.selectbox('Select a Topic', st.session_state.topics) | |
| st.session_state.selected_topic = selected_topic | |
| # Optional input field for the number of words | |
| no_words = st.text_input('Number of Words (optional)', value='') | |
| # Dropdown selection for the blog style | |
| blog_style = st.selectbox('Writing the blog for', BLOG_STYLES, index=0) | |
| # Button to generate the blog content | |
| generate_blog = st.button("Generate Blog Content") | |
| # Display the generated blog response | |
| if generate_blog: | |
| with st.spinner('Generating blog content...'): | |
| if no_words == '': | |
| no_words = '500' # Default to 500 words if not provided | |
| response = getLLamaResponse(st.session_state.selected_topic, no_words, blog_style) | |
| time.sleep(2) # Simulate processing time | |
| st.write(response) | |