import streamlit as st from langchain.prompts import PromptTemplate from langchain.llms import CTransformers def get_blog_response(blog_context,blog_length,style): llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', model_type='llama', config={'max_new_tokens':256, 'temperature':0.01}) template = """write a blog for {style} job profile for a topic {text} within {n_words} words.""" prompt = PromptTemplate(input_variables=['style','text','n_words'],template=template) response = llm(prompt.format(style=style,text=blog_context,n_words=blog_length)) return response st.set_page_config(page_title="Generate blogs", page_icon = "X", layout='centered', initial_sidebar_state = 'collapsed') st.header("Generate blogs") input_text = st.text_input("Enter the blog topic") col1,col2 = st.columns([5,5]) with col1: no_words = st.text_input('No of words') with col2: blog_style = st.selectbox('writing the blog for',('Researchers','Data Scientist','Common People'),index=0) submit = st.button("Generate") if submit: st.write(get_blog_response(input_text,no_words,blog_style))