from langchain.prompts import PromptTemplate from langchain.llms import CTransformers import os import gradio as gr def GetLlamaResponse(topic): llm = CTransformers( model_type="llama", model="llama-2-7b-chat.ggmlv3.q8_0.bin", config={"max_new_tokens": 256, "temperature": 0.4}, ) template = """ Generater a poem for hungry natural who wish to eat a delicious {topic} within 256 words """ prompt = PromptTemplate( input_variables=["topic", "word_count", "poem_style", "temperature"], template=template, ) response = llm( prompt.format( word_count=256, poem_style='Natural', temperature=0.4, topic=topic, ) ) return response # st.set_page_config( # page_title="Generate Poem", # page_icon=" :pizza:", # layout="centered", # initial_sidebar_state="collapsed", # ) # st.header("Generate poems :pizza:") # topic = st.text_input("Enter the poem topic") # col1, col2 = st.columns([10, 10]) # col3 = col3 = st.columns(1)[0] # with col1: # word_count = st.text_input("Enter number of words : ") # with col2: # poem_style = st.selectbox( # "Write the poem for", ("Michelin Tasters", "Foodies", "Laymen"), index=2 # ) # with col3: # temperature = st.slider( # "Select Temperature", min_value=0.0, max_value=1.0, step=0.01 # ) # submit = st.button("Generate poem") # if submit: # st.write(GetLlamaResponse(word_count, poem_style, temperature, topic)) inputs_image_url = [ gr.Textbox(type="text", label="Image URL"), ] outputs_result_dict = [ gr.Textbox(type="text", label="Result Dictionary"), ] interface_image_url = gr.Interface( fn=GetLlamaResponse, inputs=inputs_image_url, outputs=outputs_result_dict, title="Dark review detection", cache_examples=False, ) gr.TabbedInterface( [interface_image_url], tab_names=['Some inference'] ).queue().launch()