Spaces:
Sleeping
Sleeping
| from transformers import pipeline, Conversation | |
| import gradio as gr | |
| import base64 | |
| from dotenv import load_dotenv | |
| import os | |
| # Load environment variables from the .env file de forma local | |
| load_dotenv() | |
| with open("Iso_Logotipo_Ceibal.png", "rb") as image_file: | |
| encoded_image = base64.b64encode(image_file.read()).decode() | |
| bot = pipeline("conversational",model="microsoft/DialoGPT-large",token =os.environ['TOKEN']) | |
| #"tiiuae/falcon-7b" este dio out of memory | |
| #"facebook/blenderbot-400M-distill" este es una conversaci贸n posta, aburrido | |
| # "meta-llama/Llama-2-7b-chat-hf" este se qued贸 sin ram | |
| def add_new_message(message,chat_history): | |
| conversation = Conversation() | |
| for turn in chat_history: | |
| user, bot = turn | |
| conversation.add_user_input(user) | |
| conversation.mark_processed() | |
| # 2. Append a mode response | |
| conversation.append_response(bot) | |
| conversation.add_user_input(message) | |
| return conversation | |
| def respond(message, chat_history): | |
| prompt = add_new_message(message, chat_history) | |
| # stream = client.generate_stream(prompt, | |
| # max_new_tokens=1024, | |
| # stop_sequences=["\nUser:", "<|endoftext|>"], | |
| # temperature=temperature) | |
| # #stop_sequences to not generate the user answer | |
| # acc_text = "" | |
| response = bot(prompt).generated_responses[-1] | |
| chat_history.append((message, response)) | |
| return "",chat_history | |
| #Streaming the tokens | |
| # for idx, response in enumerate(stream): | |
| # text_token = response.token.text | |
| # if response.details: | |
| # return | |
| # if idx == 0 and text_token.startswith(" "): | |
| # text_token = text_token[1:] | |
| # acc_text += text_token | |
| # last_turn = list(chat_history.pop(-1)) | |
| # last_turn[-1] += acc_text | |
| # chat_history = chat_history + [last_turn] | |
| # yield "", chat_history | |
| # acc_text = "" | |
| with gr.Blocks() as demo: | |
| gr.Markdown(""" | |
| <center> | |
| <h1> | |
| Uso de AI para un chatbot. | |
| </h1> | |
| <img src='data:image/jpg;base64,{}' width=200px> | |
| <h3> | |
| Con este espacio podr谩s hablar en formato conversaci贸n con DialoGPT-large en ingl茅s. | |
| </h3> | |
| </center> | |
| """.format(encoded_image)) | |
| with gr.Row(): | |
| chatbot = gr.Chatbot() #just to fit the notebook | |
| with gr.Row(): | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| msg = gr.Textbox(label="Texto de entrada") | |
| with gr.Column(scale=1): | |
| btn = gr.Button("Enviar") | |
| clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat") | |
| btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) | |
| msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit | |
| demo.launch() |