Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Importacion de Librerias
|
| 2 |
+
import os
|
| 3 |
+
import openai
|
| 4 |
+
from llama_index.core import SimpleDirectoryReader, ServiceContext, VectorStoreIndex
|
| 5 |
+
from llama_index.core import (
|
| 6 |
+
SimpleDirectoryReader,
|
| 7 |
+
VectorStoreIndex,
|
| 8 |
+
ServiceContext,
|
| 9 |
+
StorageContext,
|
| 10 |
+
Response,
|
| 11 |
+
Document,
|
| 12 |
+
load_index_from_storage
|
| 13 |
+
)
|
| 14 |
+
from llama_index.llms.openai import OpenAI
|
| 15 |
+
import gradio as gr
|
| 16 |
+
from gradio import components
|
| 17 |
+
import textwrap
|
| 18 |
+
import datetime
|
| 19 |
+
|
| 20 |
+
openai.api_key = os.environ.get('openai_key')
|
| 21 |
+
os.environ["OPENAI_API_KEY"] = os.environ.get('openai_key')
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Cargar modelo
|
| 25 |
+
exec(os.environ.get('storage_context'))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
### Inicio context ###
|
| 30 |
+
|
| 31 |
+
# load index
|
| 32 |
+
storage_context = StorageContext.from_defaults(persist_dir = "./")
|
| 33 |
+
index = load_index_from_storage(storage_context, index_id="vector_index")
|
| 34 |
+
print("Se carg贸 el index.")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# load from disk
|
| 38 |
+
|
| 39 |
+
llm=OpenAI(model="gpt-3.5-turbo", temperature=0.1)
|
| 40 |
+
query_engine = index.as_query_engine(llm=llm, streaming=True)
|
| 41 |
+
|
| 42 |
+
prompt = """Responder en espa帽ol: """
|
| 43 |
+
|
| 44 |
+
### Fin context ###
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
import gradio as gr
|
| 49 |
+
from gradio import components
|
| 50 |
+
import textwrap
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def responder(pregunta):
|
| 54 |
+
# Respuesta con prompt sumado a la pregunta
|
| 55 |
+
respuesta = query_engine.query(prompt + pregunta)
|
| 56 |
+
|
| 57 |
+
# Muestra la cadena que se va formando palabra por palabra
|
| 58 |
+
partial_message = ""
|
| 59 |
+
for chunk in respuesta.response_gen:
|
| 60 |
+
partial_message += chunk
|
| 61 |
+
yield partial_message
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Definir la interfaz de usuario con Gradio
|
| 67 |
+
with gr.Blocks(theme='sudeepshouche/minimalist') as demo:
|
| 68 |
+
|
| 69 |
+
gr.Markdown("""
|
| 70 |
+
# Pharma.IA
|
| 71 |
+
Realiza preguntas
|
| 72 |
+
""")
|
| 73 |
+
with gr.Row():
|
| 74 |
+
with gr.Column():
|
| 75 |
+
pregunta = gr.Text(label="Pregunta", placeholder='Escribe tu pregunta aqu铆...')
|
| 76 |
+
|
| 77 |
+
with gr.Row():
|
| 78 |
+
btn_send = gr.Button(value="Preguntar", variant="primary")
|
| 79 |
+
clear = gr.Button(value="Limpiar")
|
| 80 |
+
|
| 81 |
+
#gr.Examples(label="Ejemplos", examples=[""], inputs=[pregunta])
|
| 82 |
+
|
| 83 |
+
with gr.Column():
|
| 84 |
+
response = gr.Textbox(label="Respuesta")
|
| 85 |
+
|
| 86 |
+
btn_send.click(responder, inputs=[pregunta], outputs=[response])
|
| 87 |
+
clear.click(lambda: None, None, pregunta, queue=False)
|
| 88 |
+
|
| 89 |
+
demo.queue()
|
| 90 |
+
demo.launch()
|