Vasudevakrishna commited on
Commit
6ec3cf4
·
verified ·
1 Parent(s): 00ce2e9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer
4
+
5
+ model_name = "microsoft/phi-2"
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ model_name,
8
+ trust_remote_code=True
9
+ )
10
+ model.config.use_cache = False
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
13
+ tokenizer.pad_token = tokenizer.eos_token
14
+
15
+ # peft_model_folder = 'ckpts'
16
+ # model.load_adapter(peft_model_folder)
17
+
18
+ def generate_text(input_text):
19
+ pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer, max_length=200)
20
+ result = pipe(f"{input_text}")
21
+ return_answer = (result[0]['generated_text']).replace(input_text,'')
22
+ return return_answer
23
+
24
+ # Create a Gradio interface
25
+ iface = gr.Interface(
26
+ fn=generate_text, # Function to be called on user input
27
+ inputs=gr.Textbox(
28
+ label="Ask question?",
29
+ info="Enter your prompt:"
30
+ ),
31
+ outputs=gr.Textbox(
32
+ label="Response from AI Model: ",
33
+ ),
34
+ )
35
+
36
+ # Launch the Gradio app
37
+ iface.launch()