Spaces:
Sleeping
Sleeping
File size: 1,198 Bytes
f9e5187 df9f905 baf1e2b f9e5187 baf1e2b f9e5187 baf1e2b b7b3c8c baf1e2b d8a6b9c baf1e2b f9e5187 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import streamlit as st
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
## Function To get response from LLAma 2 model
def getLLamaresponse(input_text):
### LLama2 model
# Load the fine-tuned model and tokenizer
model_name = "Jithendra-k/InterACT_mini"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define the input prompt
#prompt = "I want to drink water"
# Run text generation pipeline with the model
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=50, do_sample=True, repetition_penalty=1.9)
result = pipe(f"<s>[INST] {input_text} [/INST]")
# print(result[0]['generated_text'])
return result[0]['generated_text']
st.set_page_config(page_title="Generate Keywords from User Queries",
page_icon='🤖',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate keywords from User queries 🤖")
input_text =st.text_input("Enter the query")
submit =st.button("Generate")
## Final response
if submit:
st.write(getLLamaresponse(input_text)) |