Spaces:
Build error
Build error
Create app.py
Browse filesFrom codellama70B
app.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import requests
|
| 4 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 5 |
+
from rag_embeddings import RagRetriever
|
| 6 |
+
|
| 7 |
+
# Load the RAG model
|
| 8 |
+
rag_retriever = RagRetriever("rag_sequence")
|
| 9 |
+
|
| 10 |
+
# Load the Mistral model
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/mistral-small-12L-4H-768d-albert")
|
| 12 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("EleutherAI/mistral-small-12L-4H-768d-albert")
|
| 13 |
+
|
| 14 |
+
# Load the tabular data
|
| 15 |
+
data = pd.read_csv("data.csv")
|
| 16 |
+
#ADD DATASETS HERE
|
| 17 |
+
|
| 18 |
+
def langchain(user_prompt, master_prompt):
|
| 19 |
+
# Retrieve data chunks using RAG embeddings
|
| 20 |
+
retrieved_data = rag_retriever.retrieve(user_prompt, data, num_results=5)
|
| 21 |
+
|
| 22 |
+
# Connect retrieved data chunks to user prompt and master prompt
|
| 23 |
+
input_text = user_prompt + " " + master_prompt + " " + " ".join(retrieved_data)
|
| 24 |
+
|
| 25 |
+
# Generate response using Mistral model
|
| 26 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
| 27 |
+
generated_ids = model.generate(input_ids)
|
| 28 |
+
response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
| 29 |
+
|
| 30 |
+
return response
|
| 31 |
+
|
| 32 |
+
iface = gr.Interface(
|
| 33 |
+
fn=langchain,
|
| 34 |
+
inputs=["text", "text"],
|
| 35 |
+
outputs="text",
|
| 36 |
+
title="LangChain App",
|
| 37 |
+
description="A Gradio app that retrieves specific datachunks using RAG embeding, from tabular csv data, and then connects those into the user prompt and the master prompt and then feed them into a Mistral model called from Hugging Face ran locally, then returns the response to the user via the gradio app GUI.",
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
iface.launch()
|
| 41 |
+
|
| 42 |
+
iface.launch(share=True)
|
| 43 |
+
|