jjeampierjs commited on
Commit
efbe585
·
verified ·
1 Parent(s): 8343e14

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +124 -34
src/streamlit_app.py CHANGED
@@ -1,40 +1,130 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
 
 
 
 
 
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
 
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
 
 
 
 
 
 
 
 
 
14
  """
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.chains import RetrievalQA
4
+ from langchain_community.embeddings import OpenAIEmbeddings
5
+ from langchain.chat_models import ChatOpenAI
6
+ from langchain_community.vectorstores import Pinecone as LangchainPinecone
7
+ from PIL import Image
8
+ import os
9
 
10
+ # ========================
11
+ # Configuración de claves
12
+ # ========================
13
+ openai_api_key = os.environ.get("OPENAI_API_KEY")
14
+ pinecone_key = os.environ.get("PINECONE_API_KEY")
15
+ pinecone_env = "us-east-1"
16
+ index_name = "knowledge-base-eliminatorias"
17
 
18
+ # =====================
19
+ # Configuración de app
20
+ # =====================
21
+ st.set_page_config(page_title="Chatbot usando ChatGPT", page_icon="⚽")
22
 
23
+ # ====================
24
+ # Mensaje de bienvenida
25
+ # ====================
26
+ msg_chatbot = """
27
+ Soy un chatbot que te ayudará a conocer sobre las eliminatorias sudamericanas:
28
+ ### Puedo ayudarte con las siguientes preguntas:
29
+ - ¿Quién es el líder en la tabla de posiciones?
30
+ - ¿Cuáles son los próximos partidos de Perú?
31
+ - Bríndame la tabla de posiciones
32
+ - Y muchas más...
33
  """
34
 
35
+ # ======================
36
+ # Reinicio de historial (antes de dibujar mensajes)
37
+ # ======================
38
+ if st.session_state.get("clear_chat", False):
39
+ st.session_state.clear_chat = False
40
+ st.session_state.messages = [{"role": "assistant", "content": msg_chatbot}]
41
+ st.rerun()
42
+
43
+ # =========
44
+ # Sidebar
45
+ # =========
46
+ with st.sidebar:
47
+ st.title("Chatbot usando OpenAI (ChatGPT)")
48
+ image = Image.open('src/conmebol.jpg')
49
+ st.image(image, caption='Conmebol')
50
+ st.markdown("""
51
+ ### Propósito
52
+ Este chatbot utiliza una base de conocimiento (Pinecone) con información del sitio web de Marca.
53
+ Usa Langchain con ChatGPT de OpenAI.
54
+ ### Fuentes
55
+ - Marca - (https://www.marca.com)
56
+ """)
57
+
58
+ if st.button("🧹 Limpiar chat"):
59
+ st.session_state.clear_chat = True
60
+ st.rerun()
61
+
62
+ # =======================
63
+ # Inicializar historial si es necesario
64
+ # =======================
65
+ if "messages" not in st.session_state:
66
+ st.session_state.messages = [{"role": "assistant", "content": msg_chatbot}]
67
+
68
+ # =======================
69
+ # Mostrar historial
70
+ # =======================
71
+ for message in st.session_state.messages:
72
+ with st.chat_message(message["role"]):
73
+ st.write(message["content"])
74
+
75
+ # ====================
76
+ # Generar respuesta
77
+ # ====================
78
+ def generate_openai_pinecone_response(prompt_input):
79
+ llm = ChatOpenAI(
80
+ openai_api_key=openai_api_key,
81
+ model_name="gpt-3.5-turbo",
82
+ temperature=0.85
83
+ )
84
+
85
+ template = """Responde a la pregunta basada en el siguiente contexto.
86
+ Si no puedes responder, di: "No lo sé, disculpa, puedes buscar en internet."
87
+ Contexto:
88
+ {context}
89
+ Pregunta: {question}
90
+ Respuesta usando también emoticones:
91
+ """
92
+
93
+ prompt = PromptTemplate(
94
+ input_variables=["context", "question"],
95
+ template=template
96
+ )
97
+
98
+ embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
99
+
100
+ vectorstore = LangchainPinecone.from_existing_index(
101
+ index_name=index_name,
102
+ embedding=embeddings
103
+ )
104
+
105
+ qa = RetrievalQA.from_chain_type(
106
+ llm=llm,
107
+ chain_type='stuff',
108
+ retriever=vectorstore.as_retriever(),
109
+ verbose=True,
110
+ chain_type_kwargs={"prompt": prompt}
111
+ )
112
+
113
+ return qa.run(prompt_input)
114
+
115
+ # ====================
116
+ # Interfaz principal
117
+ # ====================
118
+ prompt = st.chat_input("Ingresa tu pregunta")
119
+ if prompt:
120
+ st.session_state.messages.append({"role": "user", "content": prompt})
121
+ with st.chat_message("user"):
122
+ st.write(prompt)
123
+
124
+ if st.session_state.messages[-1]["role"] != "assistant":
125
+ with st.chat_message("assistant"):
126
+ with st.spinner("Esperando respuesta..."):
127
+ response = generate_openai_pinecone_response(prompt)
128
+ placeholder = st.empty()
129
+ placeholder.markdown(response)
130
+ st.session_state.messages.append({"role": "assistant", "content": response})