jgs-430 commited on
Commit
1d1af7e
·
1 Parent(s): 7378546

Add Gradio app to run model inference

Browse files
Files changed (2) hide show
  1. app.py +16 -3
  2. requirements.txt +3 -0
app.py CHANGED
@@ -1,7 +1,20 @@
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Load your model from Hugging Face Hub
5
+ pipe = pipeline("text-generation", model="AgileGenAI/JIRA-story-point-increment-predictor")
6
+
7
+ # Define inference function
8
+ def predict(prompt):
9
+ result = pipe(prompt, max_new_tokens=100)
10
+ return result[0]["generated_text"]
11
+
12
+ # Create a simple Gradio interface
13
+ demo = gr.Interface(
14
+ fn=predict,
15
+ inputs=gr.Textbox(label="Enter prompt"),
16
+ outputs=gr.Textbox(label="Predicted output"),
17
+ title="JIRA Story Point Increment Predictor"
18
+ )
19
 
 
20
  demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio