Update app.py
Browse files
app.py
CHANGED
|
@@ -1,42 +1,26 @@
|
|
| 1 |
from transformers import pipeline
|
| 2 |
import gradio as gr
|
| 3 |
|
| 4 |
-
|
| 5 |
sentiment = pipeline('sentiment-analysis')
|
| 6 |
-
|
| 7 |
-
def get_sentiment(text):
|
| 8 |
-
response = sentiment(text)
|
| 9 |
-
|
| 10 |
-
return response[0]['label'], response[0]['score']
|
| 11 |
-
|
| 12 |
-
textbox = gr.Textbox(label="Type your review", placeholder="example: good place with great food", lines=2)
|
| 13 |
-
|
| 14 |
-
gr.Interface(
|
| 15 |
-
get_sentiment,
|
| 16 |
-
textbox, #'text',
|
| 17 |
-
"text").launch()
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
### TRanslation
|
| 21 |
en_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
|
| 22 |
-
|
| 23 |
-
return en_to_fr(input)
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
gr.
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
|
| 34 |
-
img_to_text = pipeline(model="ydshieh/vit-gpt2-coco-en")
|
| 35 |
-
def img_text(img):
|
| 36 |
-
return img_to_text(img)
|
| 37 |
|
| 38 |
-
img = gr.Image()
|
| 39 |
-
gr.Interface(
|
| 40 |
-
img_to_text,
|
| 41 |
-
img, #'text',
|
| 42 |
-
"text").launch()
|
|
|
|
| 1 |
from transformers import pipeline
|
| 2 |
import gradio as gr
|
| 3 |
|
|
|
|
| 4 |
sentiment = pipeline('sentiment-analysis')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
en_to_fr = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
|
| 6 |
+
img_to_text = pipeline(model="ydshieh/vit-gpt2-coco-en")
|
|
|
|
| 7 |
|
| 8 |
+
with gr.Blocks() as demo:
|
| 9 |
+
with gr.Tab("Sentiment"):
|
| 10 |
+
txt = gr.Textbox(label="Type your review", lines=2,
|
| 11 |
+
placeholder="e.g. good place with great food")
|
| 12 |
+
out1 = gr.Label()
|
| 13 |
+
txt.submit(lambda t: sentiment(t)[0], inputs=txt, outputs=out1)
|
| 14 |
|
| 15 |
+
with gr.Tab("Translation"):
|
| 16 |
+
txt2 = gr.Textbox(label="Type text to translate", lines=2)
|
| 17 |
+
out2 = gr.Textbox()
|
| 18 |
+
txt2.submit(lambda t: en_to_fr(t)[0]['translation_text'], inputs=txt2, outputs=out2)
|
| 19 |
|
| 20 |
+
with gr.Tab("Image → Text"):
|
| 21 |
+
img = gr.Image(type="pil")
|
| 22 |
+
out3 = gr.Textbox()
|
| 23 |
+
img.change(lambda i: img_to_text(i)[0]['generated_text'], img, out3)
|
| 24 |
|
| 25 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|