Spaces:
Sleeping
Sleeping
File size: 64,468 Bytes
890eab0 ca54164 ac43a8f 856e758 ca54164 856e758 ca54164 890eab0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | # -*- coding: utf-8 -*-
"""[Student]_Module_2_Session_3[1].ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1XTqd7vFIlHX6bgIBkZf2UUTw9ekrOO0T
Installations
"""
"""#Let's build a demo for a sentiment analysis task !
Import the necessary modules :
"""
from transformers import pipeline
import gradio as gr
"""Import the pipeline :"""
sentiment = pipeline("sentiment-analysis")
"""Test the pipeline on these reviews (you can also test on your own reviews) :"""
#"I really enjoyed my stay !"
#"Worst rental I ever got"
sentiment(["I really enjoyed my stay !","Worst rental I ever got"])
"""What is the format of the output ? How can you get only the sentiment or the confidence score ?"""
sentiment(["I really enjoyed my stay !","Worst rental I ever got"])[0]["label"]
sentiment(["I really enjoyed my stay !","Worst rental I ever got"])[0]["score"]
"""Create a function that takes a text in input, and returns a sentiment, and a confidence score as 2 different variables"""
def get_sentiment(text):
lbl = sentiment(text)[0]["label"]
scr = sentiment(text)[0]["score"]
return lbl,scr
summarizer = pipeline("summarization")
def get_sum(text):
return summarizer(text)[0]["summary_text"]
from gtts import gTTS
def convert(text):
tts = gTTS(text)
path = "audio.mp3"
tts.save(path)
return path
#to get access to the model on hugging face
from huggingface_hub import InferenceClient
#You are creating a connection to a model hosted on Hugging Face using their InferenceClient.
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
# Parameters:
# - message: the new message the user just sent (like "What's the weather?")
# - history: a list of all the previous messages in the chat
def respond(message, history):
#Why use this structure?
#Because the API expects the messages in an ordered format, where each message has a role (system, user, assistant) and content.
#We need to keep the conversation order, so a list is used (order matters).
#Each message has structured data (role and content), so a dictionary is used.
#The list contains elements, and each element here is a dictionary.
# This tells the model how to behave (like giving it a "job description").
messages = [{"role": "system", "content": "You are a helpful formal assistant."}]
# This loop recreates the entire chat history in a format the model understands.
# It adds each user question and the assistant's response in order.
for user, bot in history:
messages.append({"role": "user", "content": user})
messages.append({"role": "assistant", "content": bot})
# Add the new message to the end of the conversation
messages.append({"role": "user", "content": message})
# Send the conversation to the Mistral model via Hugging Face
# max_tokens=300 limits how long the reply can be
try:
response = client.chat_completion(messages, max_tokens=400,stop=["User:", "Q:"])
print(response)
except Exception as e:
print("ERROR:", e)
# Extract the first reply from the model's response
bot_reply = response.choices[0].message.content
return bot_reply
convo_tab = gr.ChatInterface(fn = respond,title = "Chatbot", description = "feel free to ask anything!")
conv_tab = gr.Interface(fn = convert, inputs = gr.Textbox(label= "Enter your text here: "), outputs = gr.Audio(label = "Here's the audio: "), title= "Text-to-Speech")
"""Build an interface for the app using Gradio.
The customer wants this result :

"""
sen_tab = gr.Interface(fn = get_sentiment, inputs = gr.Textbox(label="Enter your review: ",placeholder= "(eg..I love this product)"),outputs = [gr.Textbox(label="Sentiment:"),gr.Number(label="Score:")], title = "Sentiment Analysis" )
sum_tab = gr.Interface(fn = get_sum, inputs = gr.Textbox(label= "Enter your text to summarize: "), outputs = gr.Textbox(label="Summarization: "), title = "Summarization")
demo = gr.TabbedInterface(
[sen_tab, sum_tab, conv_tab, convo_tab],
["Sentiment-Analysis","Summarization", "Text-To-Speech","Chatbot"],
theme = "soft"
)
demo.launch() |