Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,10 +1,5 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from mistralai.client import MistralClient
|
| 3 |
-
from mistralai.models.chat_completion import ChatMessage
|
| 4 |
-
import requests
|
| 5 |
-
import numpy as np
|
| 6 |
-
import faiss
|
| 7 |
import os
|
|
|
|
| 8 |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex
|
| 9 |
from llama_index.readers import SimpleWebPageReader
|
| 10 |
from llama_index.llms import MistralAI
|
|
@@ -12,89 +7,53 @@ from llama_index.embeddings import MistralAIEmbedding
|
|
| 12 |
from llama_index import ServiceContext
|
| 13 |
from llama_index.query_engine import RetrieverQueryEngine
|
| 14 |
|
| 15 |
-
|
|
|
|
| 16 |
description = "Exemple d'assistant avec Gradio et Mistral AI via son API"
|
| 17 |
placeholder = "Posez moi une question sur l'agriculture"
|
| 18 |
placeholder_url = "Donner moi une url qui va servir de contexte agricole complémentaire"
|
|
|
|
| 19 |
examples = ["Comment fait on pour produire du maïs ?", "Rédige moi une lettre pour faire un stage dans une exploitation agricole", "Comment reprendre une exploitation agricole ?"]
|
| 20 |
|
| 21 |
-
|
| 22 |
-
client = MistralClient(api_key=api_key)
|
| 23 |
-
chat_model = 'mistral-small'
|
| 24 |
|
| 25 |
-
|
| 26 |
-
embed_model = MistralAIEmbedding(model_name='mistral-embed', api_key=api_key)
|
| 27 |
-
service_context = ServiceContext.from_defaults(chunk_size=512, llm=llm, embed_model=embed_model)
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
query_engine = index.as_query_engine(similarity_top_k=2)
|
| 36 |
-
# get document source
|
| 37 |
-
# response = requests.get(url)
|
| 38 |
-
# text = response.text
|
| 39 |
-
# print(text)
|
| 40 |
-
# chunk_size = 512
|
| 41 |
-
# split in chunks
|
| 42 |
-
# chunks = [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
|
| 43 |
-
# embbed in Mistral to have vectors
|
| 44 |
-
# text_embeddings = np.array([get_text_embedding(chunk) for chunk in chunks])
|
| 45 |
-
# print(text_embeddings)
|
| 46 |
-
# d = text_embeddings.shape[1]
|
| 47 |
-
documents
|
| 48 |
-
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
embeddings_batch_response = client.embeddings(
|
| 53 |
-
model="mistral-embed",
|
| 54 |
-
input=input
|
| 55 |
-
)
|
| 56 |
-
return embeddings_batch_response.data[0].embedding
|
| 57 |
-
|
| 58 |
-
# build a prompt
|
| 59 |
-
def build_prompt(user_input):
|
| 60 |
-
retrieved_chunk = query_engine.query(user_input)
|
| 61 |
-
prompt = f"""
|
| 62 |
-
Context information is below.
|
| 63 |
-
---------------------
|
| 64 |
-
{retrieved_chunk}
|
| 65 |
-
---------------------
|
| 66 |
-
Given the context information and not prior knowledge, answer the query.
|
| 67 |
-
Query: {user_input}
|
| 68 |
-
Answer:
|
| 69 |
-
"""
|
| 70 |
-
|
| 71 |
-
def chat_with_mistral(user_input, history):
|
| 72 |
-
prompt = build_prompt(user_input)
|
| 73 |
-
messages = [ChatMessage(role="user", content=prompt)]
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
|
|
|
|
|
|
| 79 |
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
with gr.Row():
|
| 83 |
-
gr.Markdown("#Mixtral-8x7B Playground Space!")
|
| 84 |
with gr.Row():
|
| 85 |
url_msg = gr.Textbox(placeholder=placeholder_url, container=False, scale=7)
|
| 86 |
-
url_btn = gr.Button(value="
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
with gr.Row():
|
| 91 |
-
chatbot=gr.Chatbot(height=300)
|
| 92 |
-
with gr.Row():
|
| 93 |
-
msg = gr.Textbox(placeholder=placeholder, container=False, scale=7)
|
| 94 |
-
msg_btn = gr.Button("Envoyer")
|
| 95 |
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
-
|
| 99 |
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex
|
| 4 |
from llama_index.readers import SimpleWebPageReader
|
| 5 |
from llama_index.llms import MistralAI
|
|
|
|
| 7 |
from llama_index import ServiceContext
|
| 8 |
from llama_index.query_engine import RetrieverQueryEngine
|
| 9 |
|
| 10 |
+
|
| 11 |
+
title = "Gaia Mistral Chat Demo"
|
| 12 |
description = "Exemple d'assistant avec Gradio et Mistral AI via son API"
|
| 13 |
placeholder = "Posez moi une question sur l'agriculture"
|
| 14 |
placeholder_url = "Donner moi une url qui va servir de contexte agricole complémentaire"
|
| 15 |
+
placeholder_api_key = "API key"
|
| 16 |
examples = ["Comment fait on pour produire du maïs ?", "Rédige moi une lettre pour faire un stage dans une exploitation agricole", "Comment reprendre une exploitation agricole ?"]
|
| 17 |
|
| 18 |
+
query_engine = None
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
with gr.Blocks() as demo:
|
|
|
|
|
|
|
| 21 |
|
| 22 |
+
gr.Markdown(""" ### Welcome to Level 2 Demo
|
| 23 |
+
Add an url and your API key at the bottom of the bottom before interacting with the Chat. This demo allows you to interact with a webpage and then ask questions to Mistral APIs. Mistral will answer with the context extracted from the webpage.
|
| 24 |
+
""")
|
| 25 |
+
chatbot = gr.Chatbot()
|
| 26 |
+
msg = gr.Textbox()
|
| 27 |
+
clear = gr.ClearButton([msg, chatbot])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
with gr.Row():
|
| 30 |
+
api_key_text_box = gr.Textbox(placeholder=placeholder_api_key, container=False, scale=7)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
def setup_with_url(url, api_key):
|
| 33 |
+
global query_engine
|
| 34 |
+
# Set-up clients
|
| 35 |
+
llm = MistralAI(api_key=api_key,model="mistral-medium")
|
| 36 |
+
embed_model = MistralAIEmbedding(model_name='mistral-embed', api_key=api_key)
|
| 37 |
+
service_context = ServiceContext.from_defaults(chunk_size=1024, llm=llm, embed_model=embed_model)
|
| 38 |
|
| 39 |
+
# Set-up db
|
| 40 |
+
documents = SimpleWebPageReader(html_to_text=True).load_data([url])
|
| 41 |
+
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
|
| 42 |
+
query_engine = index.as_query_engine(similarity_top_k=15)
|
| 43 |
+
return "I'm ready, please add a question here."
|
| 44 |
|
|
|
|
|
|
|
| 45 |
with gr.Row():
|
| 46 |
url_msg = gr.Textbox(placeholder=placeholder_url, container=False, scale=7)
|
| 47 |
+
url_btn = gr.Button(value="Set-up API and process url ✅", interactive=True)
|
| 48 |
+
url_btn.click(setup_with_url, [url_msg, api_key_text_box], msg, show_progress= "full")
|
| 49 |
+
|
| 50 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
+
def respond(message, chat_history):
|
| 53 |
+
response = query_engine.query(message)
|
| 54 |
+
chat_history.append((message, str(response)))
|
| 55 |
+
return chat_history
|
| 56 |
|
| 57 |
+
msg.submit(respond, [msg, chatbot], [chatbot])
|
| 58 |
|
| 59 |
+
demo.launch()
|