Benjov commited on
Commit
c6a2567
·
1 Parent(s): 3970381

29/10/2023

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. app.py +297 -67
  3. requirements.txt +6 -8
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app.py CHANGED
@@ -1,91 +1,321 @@
 
 
 
1
  import os
 
 
 
2
  import pandas as pd
 
3
  import openai
4
- import re
5
- import json
 
 
 
 
 
 
6
  from langchain.chat_models import ChatOpenAI
7
- import regex
8
- import gradio as gr
 
 
 
 
 
 
9
  from langchain.schema import Document
10
- from langchain.text_splitter import RecursiveCharacterTextSplitter
11
- from langchain.embeddings.openai import OpenAIEmbeddings
12
- from openai.embeddings_utils import get_embedding
13
- from openai.embeddings_utils import cosine_similarity
14
- import gspread # See: https://docs.gspread.org/en/v5.10.0/user-guide.html
15
- from oauth2client.service_account import ServiceAccountCredentials
16
- from datetime import datetime
17
-
18
- # API de OpenAI
 
19
  openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
 
 
20
 
21
- # Establece las credenciales y la API
 
 
 
 
 
 
 
22
 
23
- #credentials = os.getenv( "credentials" )
 
 
 
 
24
 
25
- #credentials = json.loads( credentials )
 
 
 
 
26
 
27
- #gc = gspread.service_account_from_dict( credentials )
 
 
 
 
28
 
29
- #Google_URL = os.getenv( "Google_URL" )
 
 
 
 
30
 
31
- # Cargar el archivo
32
- df_Expediente = pd.read_csv( os.getenv( "Data" ) )
 
 
 
33
 
34
- # Main OpenAI Function
35
  #
36
- # Function: Get embeddings
 
 
 
37
 
38
- def get_embedding( text , model ):
39
- text = text.replace("\n", " ")
40
- return openai.Embedding.create( input = [text], model = model )['data'][0]['embedding']
 
 
41
 
42
- # Función de búsqueda
 
 
 
 
43
 
44
- def buscar( busqueda, datos, n_resultados):
45
- #
46
- busqueda_embed = get_embedding( busqueda, "text-embedding-ada-002" )
47
- #
48
- datos['Similitud'] = datos['Embedding'].apply( lambda x: cosine_similarity( eval( x ) , busqueda_embed ) )
49
- #
50
- datos = datos.sort_values('Similitud', ascending = False )
51
- #
52
- return datos.iloc[:n_resultados][['Documento', 'Pagina', 'Textos', 'NumPalabras', 'Embedding', 'Similitud']]
53
 
54
  #
55
- def Chat( user_message_1 ):
56
- #
57
- df_final = buscar( user_message_1, df_Expediente, 20).reset_index( drop = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  #
59
- Textos = df_final[ ['Documento', 'Pagina', 'Textos'] ]
 
 
 
 
 
60
  #
61
- Textos_Concatenados = '\n\n\n'.join( Textos.apply( lambda row: ' | '.join( row ), axis = 1) )
 
 
 
 
 
 
 
 
62
  #
63
- # Save Question and date time
64
- #update_records( user_message_1 )
65
- return Textos_Concatenados
66
- #
67
- #
68
- with gr.Blocks() as demo:
 
69
  #
70
- gr.Markdown("App basada en servicios (Embeddings) de OpenAI (Chat GPT-3.5)")
71
- gr.Markdown("Este es un producto de prueba desarrollado por GAMES Economics (https://gamesecon.com/)")
72
- gr.Markdown("Contacto: boliva@gamesecon.com")
73
- gr.Markdown("Este es un information retrieval system o sistema de recuperación de información.")
74
- gr.Markdown("Este tipo de herramientas son un proceso para obtener información relevante \
75
- para una necesidad de información a partir de una colección de textos.")
76
- gr.Markdown("Las búsquedas pueden basarse en un texto completo, una frase o una serie de conceptos.")
77
- gr.Markdown("No obstante, cuanto más detallada sea la sentencia buscada, la recuperación de \
78
- información será más precisa.")
79
- gr.Markdown("En alguna medida, los motores de búsqueda web (como Google) son ejemplos de estos sistemas.")
80
- busqueda = gr.Textbox(label = "Escribe la pregunta, tema o enunciado.")
81
- greet_btn = gr.Button("Preguntar")
82
- # Crear dos widgets de salida en lugar de uno
83
- output1 = gr.Textbox(label = "El Top 20 de los extractos de documentos que coinciden con la consulta son:")
84
-
85
- # Modificar la función click para asignar cada parte de la tupla a un widget diferente
86
- greet_btn.click(fn = Chat, inputs=[busqueda], outputs=[output1])
87
  #
88
- #demo.launch( share = True )
89
- demo.launch( )
90
- #
91
- #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #--------------------------------------------------------------------
2
+ # DEPENDENCIAS
3
+ #--------------------------------------------------------------------
4
  import os
5
+ from io import StringIO
6
+ import requests
7
+ import gradio as gr
8
  import pandas as pd
9
+ import numpy as np
10
  import openai
11
+ import tiktoken
12
+ #import streamlit as st
13
+ from openai.embeddings_utils import get_embedding, cosine_similarity
14
+ #from langchain.document_loaders import PyPDFLoader
15
+ #from langchain.text_splitter import CharacterTextSplitter
16
+ #from PyPDF2 import PdfReader, PdfFileReader
17
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
18
+ from langchain.vectorstores import FAISS
19
  from langchain.chat_models import ChatOpenAI
20
+ from langchain.memory import ConversationBufferMemory
21
+ from langchain.chains import ConversationalRetrievalChain
22
+ from langchain.llms import OpenAI, HuggingFaceHub
23
+ from langchain.chains.question_answering import load_qa_chain
24
+ #from htmlTemplates import css, bot_template, user_template
25
+
26
+ #import ast
27
+ #from langchain.schema.vectorstore import Document
28
  from langchain.schema import Document
29
+ #import fitz # PyMuPDF
30
+ #import pytesseract
31
+ #from PIL import Image
32
+ #from io import BytesIO
33
+ #import cv2
34
+
35
+
36
+ #--------------------------------------------------------------------
37
+ # LLAVES
38
+ #--------------------------------------------------------------------
39
  openai.api_key = os.getenv("OPENAI_API_KEY")
40
+ api_key = os.getenv("OPENAI_API_KEY")
41
+ token = os.getenv("token")
42
+ headers = { 'Authorization': f'token {token}',
43
+ 'Accept': 'application/vnd.github.v3.raw' }
44
 
45
+ #--------------------------------------------------------------------
46
+ # CARGAR CSV EMBEDDINGS
47
+ #--------------------------------------------------------------------
48
+ #
49
+ url_tomos_conf_DPR = os.getenv("url_tomos_conf_DPR")
50
+ response_tomos_conf_DPR = requests.get( url_tomos_conf_DPR, headers = headers )
51
+ csv_content_tomos_conf_DPR = response_tomos_conf_DPR.text
52
+ tomos_conf_DPR = pd.read_csv(StringIO( csv_content_tomos_conf_DPR ))
53
 
54
+ #
55
+ url_tomos_conf_cita = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/04.%20Tomos%20Confidenciales%20WM%20(folios%20citados%20en%20DPR)/tomos_conf_cita.csv'
56
+ response_tomos_conf_cita = requests.get( url_tomos_conf_cita, headers = headers )
57
+ csv_content_tomos_conf_cita = response_tomos_conf_cita.text
58
+ tomos_conf_cita = pd.read_csv(StringIO( csv_content_tomos_conf_cita ))
59
 
60
+ #
61
+ url_df_tomos_1a28_01 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_01.csv'
62
+ response_df_tomos_1a28_01 = requests.get( url_df_tomos_1a28_01, headers = headers )
63
+ csv_content_df_tomos_1a28_01 = response_df_tomos_1a28_01.text
64
+ df_tomos_1a28_01 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_01 ))
65
 
66
+ #
67
+ url_df_tomos_1a28_02 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_02.csv'
68
+ response_df_tomos_1a28_02 = requests.get( url_df_tomos_1a28_02, headers = headers )
69
+ csv_content_df_tomos_1a28_02 = response_df_tomos_1a28_02.text
70
+ df_tomos_1a28_02 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_02 ))
71
 
72
+ #
73
+ url_df_tomos_1a28_03 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_03.csv'
74
+ response_df_tomos_1a28_03 = requests.get( url_df_tomos_1a28_03, headers = headers )
75
+ csv_content_df_tomos_1a28_03 = response_df_tomos_1a28_03.text
76
+ df_tomos_1a28_03 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_03 ))
77
 
78
+ #
79
+ url_df_tomos_1a28_04 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_04.csv'
80
+ response_df_tomos_1a28_04 = requests.get( url_df_tomos_1a28_04, headers = headers )
81
+ csv_content_df_tomos_1a28_04 = response_df_tomos_1a28_04.text
82
+ df_tomos_1a28_04 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_04 ))
83
 
 
84
  #
85
+ url_df_tomos_1a28_05 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_05.csv'
86
+ response_df_tomos_1a28_05 = requests.get( url_df_tomos_1a28_05, headers = headers )
87
+ csv_content_df_tomos_1a28_05 = response_df_tomos_1a28_05.text
88
+ df_tomos_1a28_05 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_05 ))
89
 
90
+ #
91
+ url_df_tomos_1a28_06 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_06.csv'
92
+ response_df_tomos_1a28_06 = requests.get( url_df_tomos_1a28_06, headers = headers )
93
+ csv_content_df_tomos_1a28_06 = response_df_tomos_1a28_06.text
94
+ df_tomos_1a28_06 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_06 ))
95
 
96
+ #
97
+ url_df_tomos_1a28_07 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_07.csv'
98
+ response_df_tomos_1a28_07 = requests.get( url_df_tomos_1a28_07, headers = headers )
99
+ csv_content_df_tomos_1a28_07 = response_df_tomos_1a28_07.text
100
+ df_tomos_1a28_07 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_07 ))
101
 
102
+ #
103
+ url_df_tomos_1a28_08 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_08.csv'
104
+ response_df_tomos_1a28_08 = requests.get( url_df_tomos_1a28_08, headers = headers )
105
+ csv_content_df_tomos_1a28_08 = response_df_tomos_1a28_08.text
106
+ df_tomos_1a28_08 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_08 ))
 
 
 
 
107
 
108
  #
109
+ url_df_tomos_1a28_09 = 'https://api.github.com/repos/benjov/Data_Text_WM/contents/df/pages/01.%20Tomos%20Reservados/df_tomos_1a28_09.csv'
110
+ response_df_tomos_1a28_09 = requests.get( url_df_tomos_1a28_09, headers = headers )
111
+ csv_content_df_tomos_1a28_09 = response_df_tomos_1a28_09.text
112
+ df_tomos_1a28_09 = pd.read_csv(StringIO( csv_content_df_tomos_1a28_09 ))
113
+
114
+ #
115
+ df_tomos_1a28 = pd.concat([df_tomos_1a28_01, df_tomos_1a28_02], ignore_index = True)
116
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_03], ignore_index = True)
117
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_04], ignore_index = True)
118
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_05], ignore_index = True)
119
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_06], ignore_index = True)
120
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_07], ignore_index = True)
121
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_08], ignore_index = True)
122
+ df_tomos_1a28 = pd.concat([df_tomos_1a28, df_tomos_1a28_09], ignore_index = True)
123
+
124
+ #---------------------------------------------------------------------------------------------------------------
125
+ # UUUUPS LA COLUMNA EMBEDDINGS NO LA RECONOCE COSINESIMILARITY.. [tomos_conf_DPR, tomos_conf_cita]
126
+ #---------------------------------------------------------------------------------------------------------------
127
+ def clean_and_parse_embedding(embedding_str):
128
+ embedding_str = embedding_str.split('[')[-1].split(']')[0]
129
+ embedding_list = ast.literal_eval(embedding_str)
130
+ return [float(val) for val in embedding_list]
131
+
132
+ tomos_conf_DPR['Embedding'] = tomos_conf_DPR['Embedding'].apply(clean_and_parse_embedding)
133
+ tomos_conf_cita['Embedding'] = tomos_conf_cita['Embedding'].apply(clean_and_parse_embedding)
134
+
135
+ #---------------------------------------------------------------------------------------------------------------
136
+ # UUUUPS LA COLUMNA EMBEDDINGS NO LA RECONOCE COSINESIMILARITY.. [df_tomos_1a28]
137
+ #---------------------------------------------------------------------------------------------------------------
138
+ def parse_embedding(embedding_str):
139
+ embedding_list = ast.literal_eval(embedding_str)
140
+ return [float(val) for val in embedding_list]
141
+
142
+ df_tomos_1a28['Embedding'] = df_tomos_1a28['Embedding'].apply(parse_embedding)
143
+
144
+ #---------------------------------------------------------------------------------------------------------------
145
+ # LISTA DE DF
146
+ #---------------------------------------------------------------------------------------------------------------
147
+ list_of_dfs = [tomos_conf_DPR, tomos_conf_cita, df_tomos_1a28]
148
+
149
+ #--------------------------------------------------------------------
150
+ # HACEMOS UNA PREGUNTA Y RANKEA CHUNKS
151
+ #--------------------------------------------------------------------
152
+ def buscar(busqueda, lista_de_datos):
153
+ resultados = [] # Create an empty list to store individual DataFrame results
154
+ busqueda_embed = get_embedding(busqueda, engine="text-embedding-ada-002")
155
+
156
+ for datos in lista_de_datos:
157
+ datos["similitud"] = datos['Embedding'].apply(lambda x: cosine_similarity(x, busqueda_embed))
158
+ datos = datos.sort_values("similitud", ascending=False)
159
+ resultados.append(datos[['PDFName', 'PageNumber', 'similitud', "PageText", "Folder"]])
160
+
161
+ # Concatenate all individual DataFrames into a single DataFrame
162
+ combined_result = pd.concat(resultados).sort_values("similitud", ascending=False).head(20)
163
+ return combined_result
164
+
165
+ #--------------------------------------------------------------------
166
+ # rank for ai
167
+ #--------------------------------------------------------------------
168
+ def buscar_ai(busqueda, lista_de_datos):
169
+ resultados = [] # Create an empty list to store individual DataFrame results
170
+ busqueda_embed = get_embedding(busqueda, engine="text-embedding-ada-002")
171
+
172
+ for datos in lista_de_datos:
173
+ datos["similitud"] = datos['Embedding'].apply(lambda x: cosine_similarity(x, busqueda_embed))
174
+ datos = datos.sort_values("similitud", ascending=False)
175
+ resultados.append(datos[['PDFName', 'PageNumber', 'similitud', "PageText", "Folder"]])
176
+
177
+ # Concatenate all individual DataFrames into a single DataFrame
178
+ combined_result = pd.concat(resultados).sort_values("similitud", ascending=False).head(10)
179
+ return combined_result
180
+
181
+ #--------------------------------------------------------------------
182
+ # saque n extraactos de ""
183
+ #--------------------------------------------------------------------
184
+ def count_text_extracted(pregunta):
185
+ df = buscar(pregunta, list_of_dfs)
186
+ pdf_counts = df.groupby(['Folder', 'PDFName'])['PageNumber'].count().reset_index()
187
+
188
+ output_string = ""
189
+ for idx, row in pdf_counts.iterrows():
190
+ folder_name = row['Folder']
191
+ pdf_name = row['PDFName']
192
+ count = row['PageNumber']
193
+ page_numbers = df[(df['PDFName'] == pdf_name) & (df['Folder'] == folder_name)]['PageNumber'].tolist()
194
+ page_numbers_str = ', '.join(map(str, page_numbers))
195
+ output_string += f"Usé el archivo '{pdf_name}' del folder '{folder_name}' {count} (vez/veces) al extraer el texto de las páginas {page_numbers_str}.\n\n"
196
+
197
+ return output_string
198
+ #--------------------------------------------------------------------
199
+ # file: texto
200
+ #--------------------------------------------------------------------
201
+
202
+ def print_pdf_info(pregunta):
203
+ df = buscar(pregunta, list_of_dfs)
204
+
205
+ output_string = "" # Initialize an empty string to accumulate the output
206
+
207
+ for _, row in df.iterrows():
208
+ pdf_name = row['PDFName']
209
+ page_number = row['PageNumber']
210
+ page_text = row['PageText']
211
+
212
+ # Split page_text into lines and add a tab to each line
213
+ indented_page_text = '\n'.join(['\t' + line for line in page_text.split('\n')])
214
+
215
+ # Append the formatted output to the output string
216
+ output_string += f'De "{pdf_name}":\n \tPágina {page_number}:\n\t ------------------------------------------------------------------------------------------------------------------------------------\n{indented_page_text}\n------------------------------------------------------------------------------------------------------------------------------------\n'
217
+
218
+ return output_string
219
+ #--------------------------------------------------------------------
220
+ # vector -> document
221
+ #-------------------------------------------------------------------
222
+ def vector_document(dataframe):
223
+ string_vectors = dataframe["PageText"]
224
+ documents = [Document(page_content=content, metadata={'id': i}) for i, content in enumerate(string_vectors)]
225
+ return documents
226
+
227
+ #--------------------------------------------------------------------
228
+ # AI
229
+ #-------------------------------------------------------------------
230
+ def get_completion_from_messages( messages, model = "gpt-3.5-turbo-16k",
231
+ temperature = 0, max_tokens = 4500 ): ##Check max_tokens
232
+ response = openai.ChatCompletion.create(
233
+ model = model,
234
+ messages = messages,
235
+ temperature = temperature,
236
+ max_tokens = max_tokens,
237
+ )
238
+ return response.choices[0].message["content"]
239
+
240
+ def get_topic( user_message ):
241
  #
242
+ delimiter = "####"
243
+ system_message = f"""
244
+ Eres un abogado que trabaja en temas de competencia económica e investiga casos en México.
245
+ Siempre intenarás responder en el mayor número posible de palabras.
246
+ Las consultas o preguntas se delimitarán con los caracteres {delimiter}
247
+ """
248
  #
249
+ messages = [
250
+ {'role':'system',
251
+ 'content': system_message},
252
+ {'role':'user',
253
+ 'content': f"{delimiter}{user_message}{delimiter}"},
254
+ ]
255
+ return get_completion_from_messages( messages )
256
+
257
+ def get_respuesta( user_message, informacion):
258
  #
259
+ delimiter = "####"
260
+ system_message = f"""
261
+ Eres un abogado que trabaja en temas de competencia económica e investiga casos en México.
262
+ Siempre intenarás responder en el mayor número posible de palabras.
263
+ Las consultas o preguntas se delimitarán con los caracteres {delimiter}
264
+
265
+ """
266
  #
267
+ messages = [
268
+ {'role':'system',
269
+ 'content': system_message},
270
+ {'role':'user',
271
+ 'content': f"""
272
+ {delimiter}
273
+ Estás intentando recopilar información relevante para tu caso.
274
+ Usa exclusivamente la información contenida en la siguiente lista:
275
+ {informacion}
276
+
277
+ para responder sin límite de palabras lo siguiente: {user_message}
278
+ Responde de forma detallada.
279
+ {delimiter}
280
+ """},
281
+ ]
 
 
282
  #
283
+ return get_completion_from_messages(messages)
284
+
285
+ def chat(user_message_1):
286
+ norma_y_tema_response_1 = get_topic(user_message_1)
287
+ norma_y_tema_response_1 = norma_y_tema_response_1 # No need to use eval
288
+ norma_y_tema_response_1 += 'Todos'
289
+ uno = buscar_ai(user_message_1, list_of_dfs)
290
+ lista_info = uno[ 'PageText' ].tolist()
291
+ return get_respuesta(user_message_1, lista_info)
292
+
293
+
294
+ #.....................................................................
295
+ #--------------------------------------------------------------------
296
+ # HACEMOS UNA PREGUNTA Y GENERA UNA RESPUESTA.AI
297
+ #--------------------------------------------------------------------
298
+ #.....................................................................
299
+ llm_mio = OpenAI(openai_api_key=api_key, model_name="gpt-3.5-turbo-16k")
300
+ #RANKING _LO STESSO COME SOPRA_
301
+ def load(query):
302
+ pregunta = query
303
+ uno = buscar(pregunta, list_of_dfs)
304
+ documents = vector_document(uno)
305
+ chain = load_qa_chain(llm=llm_mio, chain_type="stuff")
306
+ result1 = chain.run(input_documents=documents, question=pregunta)
307
+ return result1
308
+
309
+ with gr.Blocks() as demo:
310
+ txt = gr.Textbox(label="Pregunta", lines=2)
311
+ btn = gr.Button(value="Listo")
312
+ txt_2 = gr.Textbox(value="", label="Donde:")
313
+ txt_3 = gr.Textbox(value="", label="Cuales:")
314
+ txt_1 = gr.Textbox(value="", label="Respuesta IA:")
315
+ btn.click(load, inputs=[txt], outputs=[txt_1])
316
+ btn.click(count_text_extracted, inputs=[txt], outputs=[txt_2])
317
+ btn.click(print_pdf_info, inputs=[txt], outputs=[txt_3])
318
+
319
+
320
+ if __name__ == "__main__":
321
+ demo.launch()
requirements.txt CHANGED
@@ -1,11 +1,9 @@
 
1
  gradio==3.27.0
2
  gradio-client==0.1.3
3
- gspread==5.10.0
4
- langchain==0.0.163
5
- oauth2client==4.1.3
6
- openai==0.27.4
7
  pandas==2.0.3
8
- plotly==5.10.0
9
- regex==2023.3.23
10
- scikit-learn==1.1.3
11
- scipy==1.9.3
 
 
1
+ requests==2.28.1
2
  gradio==3.27.0
3
  gradio-client==0.1.3
 
 
 
 
4
  pandas==2.0.3
5
+ numpy==1.25.2
6
+ openai==0.27.4
7
+ tiktoken==0.4.0
8
+ langchain==0.0.163
9
+