Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,8 +15,8 @@ tokenizer.pad_token = tokenizer.eos_token
|
|
| 15 |
peft_model_folder = './ckpts'
|
| 16 |
model.load_adapter(peft_model_folder)
|
| 17 |
|
| 18 |
-
def generate_text(input_text):
|
| 19 |
-
pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer, max_length=
|
| 20 |
result = pipe(f"{input_text}")
|
| 21 |
return_answer = (result[0]['generated_text']).replace(input_text,'')
|
| 22 |
return return_answer
|
|
@@ -24,12 +24,13 @@ def generate_text(input_text):
|
|
| 24 |
# Create a Gradio interface
|
| 25 |
iface = gr.Interface(
|
| 26 |
fn=generate_text, # Function to be called on user input
|
| 27 |
-
inputs=gr.Textbox(
|
| 28 |
label="Ask question?",
|
| 29 |
info="Enter your prompt:"
|
| 30 |
),
|
|
|
|
| 31 |
outputs=gr.Textbox(
|
| 32 |
-
label="Response from
|
| 33 |
),
|
| 34 |
)
|
| 35 |
|
|
|
|
| 15 |
peft_model_folder = './ckpts'
|
| 16 |
model.load_adapter(peft_model_folder)
|
| 17 |
|
| 18 |
+
def generate_text(input_text, max_length):
|
| 19 |
+
pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer, max_length=max_length)
|
| 20 |
result = pipe(f"{input_text}")
|
| 21 |
return_answer = (result[0]['generated_text']).replace(input_text,'')
|
| 22 |
return return_answer
|
|
|
|
| 24 |
# Create a Gradio interface
|
| 25 |
iface = gr.Interface(
|
| 26 |
fn=generate_text, # Function to be called on user input
|
| 27 |
+
inputs=[gr.Textbox(
|
| 28 |
label="Ask question?",
|
| 29 |
info="Enter your prompt:"
|
| 30 |
),
|
| 31 |
+
gr.Slider(1, 100, value = 10, step=1, label="Max Length")],
|
| 32 |
outputs=gr.Textbox(
|
| 33 |
+
label="Response from Phi2 Model: ",
|
| 34 |
),
|
| 35 |
)
|
| 36 |
|