Spaces:
No application file
No application file
| """ | |
| Data Scientist.: Dr.Eddy Giusepe Chirinos Isidro | |
| Run script: | |
| $ streamlit run main_st.py | |
| """ | |
| import sys | |
| import tiktoken | |
| import streamlit as st | |
| import openai | |
| import os | |
| from dotenv import load_dotenv, find_dotenv | |
| _ = load_dotenv(find_dotenv()) # read local .env file | |
| openai.api_key = os.environ['OPENAI_API_KEY'] | |
| # Definimos a função para contar tokens: | |
| def count_tokens(text): | |
| encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") | |
| return len(encoding.encode(text)) | |
| # Defina o limite máximo de tokens: | |
| MAX_MEMORY_TOKENS = 100 | |
| # Configurar o estado da sessão (session state) para o histórico de conversas ---> AQUI MUDA! porque estamos usando STREAMLIT 🤗 | |
| if "conversation_history" not in st.session_state: | |
| st.session_state.conversation_history = [ | |
| {"role": "system", "content": "Você é um assistente prestativo."} | |
| ] | |
| def chatbot_response(chat_input, placeholder_response): | |
| # Anexe a entrada do usuário ao histórico de conversas: | |
| st.session_state.conversation_history.append({"role": "user", "content": chat_input}) | |
| # Calculamos o total de TOKENS no histórico de conversas: | |
| total_tokens = sum(count_tokens(message["content"]) for message in st.session_state.conversation_history) | |
| # Remove the oldest message from conversation history if total tokens exceed the maximum limit | |
| while total_tokens > MAX_MEMORY_TOKENS: | |
| if len(st.session_state.conversation_history) > 2: | |
| removed_message = st.session_state.conversation_history.pop(1) | |
| total_tokens -= count_tokens(removed_message["content"]) | |
| else: | |
| break | |
| # Faça chamadas de API para OpenAI com o histórico de conversas e use respostas de streaming: | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", # or "gpt-3.5-turbo" or "gpt-4" | |
| messages=st.session_state.conversation_history, | |
| stream=True, | |
| ) | |
| # Processe a resposta da API: | |
| assistant_response = "" | |
| for chunk in response: | |
| if "role" in chunk["choices"][0]["delta"]: | |
| continue | |
| elif "content" in chunk["choices"][0]["delta"]: | |
| r_text = chunk["choices"][0]["delta"]["content"] | |
| assistant_response += r_text | |
| placeholder_response.markdown(assistant_response, unsafe_allow_html=True) | |
| # Anexe a resposta do assistente ao histórico da conversa: | |
| st.session_state.conversation_history.append({"role": "assistant", "content": assistant_response}) | |
| # App Streamlit: | |
| st.title("🤗 Chatbot 🤗") | |
| st.sidebar.header("Data Scientist.: Dr.Eddy Giusepe Chirinos Isidro") | |
| st.sidebar.write("[GitHub: EddyGiusepe](https://github.com/EddyGiusepe)") | |
| st.sidebar.write("[e-mail: eddychirinos.unac@gmail.com]()") | |
| user_input = st.text_input("Você:") | |
| response_button = st.button("Enviar") | |
| placeholder_response = st.empty() | |
| if response_button: | |
| chatbot_response(user_input, placeholder_response) | |