Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
| ## Function To get response from LLAma 2 model | |
| def getLLamaresponse(input_text): | |
| ### LLama2 model | |
| # Load the fine-tuned model and tokenizer | |
| model_name = "Jithendra-k/InterACT_mini" | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Define the input prompt | |
| #prompt = "I want to drink water" | |
| # Run text generation pipeline with the model | |
| pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=50, do_sample=True, repetition_penalty=1.9) | |
| result = pipe(f"<s>[INST] {input_text} [/INST]") | |
| # print(result[0]['generated_text']) | |
| return result[0]['generated_text'] | |
| st.set_page_config(page_title="Generate Keywords from User Queries", | |
| page_icon='🤖', | |
| layout='centered', | |
| initial_sidebar_state='collapsed') | |
| st.header("Generate keywords from User queries 🤖") | |
| input_text =st.text_input("Enter the query") | |
| submit =st.button("Generate") | |
| ## Final response | |
| if submit: | |
| st.write(getLLamaresponse(input_text)) |