Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """[Student]_Module_2_Session_3[1].ipynb | |
| Automatically generated by Colab. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1XTqd7vFIlHX6bgIBkZf2UUTw9ekrOO0T | |
| Installations | |
| """ | |
| """#Let's build a demo for a sentiment analysis task ! | |
| Import the necessary modules : | |
| """ | |
| from transformers import pipeline | |
| import gradio as gr | |
| """Import the pipeline :""" | |
| sentiment = pipeline("sentiment-analysis") | |
| """Test the pipeline on these reviews (you can also test on your own reviews) :""" | |
| #"I really enjoyed my stay !" | |
| #"Worst rental I ever got" | |
| sentiment(["I really enjoyed my stay !","Worst rental I ever got"]) | |
| """What is the format of the output ? How can you get only the sentiment or the confidence score ?""" | |
| sentiment(["I really enjoyed my stay !","Worst rental I ever got"])[0]["label"] | |
| sentiment(["I really enjoyed my stay !","Worst rental I ever got"])[0]["score"] | |
| """Create a function that takes a text in input, and returns a sentiment, and a confidence score as 2 different variables""" | |
| def get_sentiment(text): | |
| lbl = sentiment(text)[0]["label"] | |
| scr = sentiment(text)[0]["score"] | |
| return lbl,scr | |
| summarizer = pipeline("summarization") | |
| def get_sum(text): | |
| return summarizer(text)[0]["summary_text"] | |
| from gtts import gTTS | |
| def convert(text): | |
| tts = gTTS(text) | |
| path = "audio.mp3" | |
| tts.save(path) | |
| return path | |
| #to get access to the model on hugging face | |
| from huggingface_hub import InferenceClient | |
| #You are creating a connection to a model hosted on Hugging Face using their InferenceClient. | |
| client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2") | |
| # Parameters: | |
| # - message: the new message the user just sent (like "What's the weather?") | |
| # - history: a list of all the previous messages in the chat | |
| def respond(message, history): | |
| #Why use this structure? | |
| #Because the API expects the messages in an ordered format, where each message has a role (system, user, assistant) and content. | |
| #We need to keep the conversation order, so a list is used (order matters). | |
| #Each message has structured data (role and content), so a dictionary is used. | |
| #The list contains elements, and each element here is a dictionary. | |
| # This tells the model how to behave (like giving it a "job description"). | |
| messages = [{"role": "system", "content": "You are a helpful formal assistant."}] | |
| # This loop recreates the entire chat history in a format the model understands. | |
| # It adds each user question and the assistant's response in order. | |
| for user, bot in history: | |
| messages.append({"role": "user", "content": user}) | |
| messages.append({"role": "assistant", "content": bot}) | |
| # Add the new message to the end of the conversation | |
| messages.append({"role": "user", "content": message}) | |
| # Send the conversation to the Mistral model via Hugging Face | |
| # max_tokens=300 limits how long the reply can be | |
| try: | |
| response = client.chat_completion(messages, max_tokens=400,stop=["User:", "Q:"]) | |
| print(response) | |
| except Exception as e: | |
| print("ERROR:", e) | |
| # Extract the first reply from the model's response | |
| bot_reply = response.choices[0].message.content | |
| return bot_reply | |
| convo_tab = gr.ChatInterface(fn = respond,title = "Chatbot", description = "feel free to ask anything!") | |
| conv_tab = gr.Interface(fn = convert, inputs = gr.Textbox(label= "Enter your text here: "), outputs = gr.Audio(label = "Here's the audio: "), title= "Text-to-Speech") | |
| """Build an interface for the app using Gradio. | |
| The customer wants this result : | |
|  | |
| """ | |
| sen_tab = gr.Interface(fn = get_sentiment, inputs = gr.Textbox(label="Enter your review: ",placeholder= "(eg..I love this product)"),outputs = [gr.Textbox(label="Sentiment:"),gr.Number(label="Score:")], title = "Sentiment Analysis" ) | |
| sum_tab = gr.Interface(fn = get_sum, inputs = gr.Textbox(label= "Enter your text to summarize: "), outputs = gr.Textbox(label="Summarization: "), title = "Summarization") | |
| demo = gr.TabbedInterface( | |
| [sen_tab, sum_tab, conv_tab, convo_tab], | |
| ["Sentiment-Analysis","Summarization", "Text-To-Speech","Chatbot"], | |
| theme = "soft" | |
| ) | |
| demo.launch() |