Jithendra-k commited on
Commit
baf1e2b
·
verified ·
1 Parent(s): d8a6b9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -7,18 +7,18 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
7
  def getLLamaresponse(input_text):
8
  ### LLama2 model
9
  # Load the fine-tuned model and tokenizer
10
- # model_name = "Jithendra-k/interACT_LLM"
11
- # model = AutoModelForCausalLM.from_pretrained(model_name)
12
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
13
 
14
- # # Define the input prompt
15
- # #prompt = "I want to drink water"
16
 
17
- # # Run text generation pipeline with the model
18
- # pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=50, do_sample=True)
19
- # result = pipe(f"<s>[INST] {input_text} [/INST]")
20
  # print(result[0]['generated_text'])
21
- return "Hello World!"
22
 
23
  st.set_page_config(page_title="Generate Keywords from User Queries",
24
  page_icon='🤖',
 
7
  def getLLamaresponse(input_text):
8
  ### LLama2 model
9
  # Load the fine-tuned model and tokenizer
10
+ model_name = "Jithendra-k/interACT_LLM"
11
+ model = AutoModelForCausalLM.from_pretrained(model_name)
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
 
14
+ # Define the input prompt
15
+ #prompt = "I want to drink water"
16
 
17
+ # Run text generation pipeline with the model
18
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=50, do_sample=True)
19
+ result = pipe(f"<s>[INST] {input_text} [/INST]")
20
  # print(result[0]['generated_text'])
21
+ return result[0]['generated_text']
22
 
23
  st.set_page_config(page_title="Generate Keywords from User Queries",
24
  page_icon='🤖',