Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,38 +1,69 @@
|
|
| 1 |
-
from langchain.document_loaders import TextLoader
|
| 2 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 3 |
-
from langchain.embeddings import OpenAIEmbeddings
|
| 4 |
-
from langchain.vectorstores import FAISS
|
| 5 |
-
from langchain.llms import OpenAI
|
| 6 |
-
from langchain.chains import ConversationalRetrievalChain
|
| 7 |
-
|
| 8 |
import gradio as gr
|
| 9 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
#os.environ["OPENAI_API_KEY"] = "your-openai-key"
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
retriever=vectorstore.as_retriever()
|
| 29 |
-
)
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
|
| 34 |
-
|
| 35 |
-
global chat_history
|
| 36 |
-
result = qa_chain({"question": user_input, "chat_history": chat_history})
|
| 37 |
-
chat_history.append((user_input, result["answer"]))
|
| 38 |
-
return chat_history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
|
| 3 |
+
import torch
|
| 4 |
+
import faiss
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
# --- Load TinyLlama Chat Model ---
|
| 8 |
+
llama_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
| 9 |
+
llama_tokenizer = AutoTokenizer.from_pretrained(llama_id)
|
| 10 |
+
llama_model = AutoModelForCausalLM.from_pretrained(llama_id)
|
| 11 |
+
|
| 12 |
+
# --- Load Nomic Embedding Model ---
|
| 13 |
+
nomic_id = "nomic-ai/nomic-embed-text-v1"
|
| 14 |
+
nomic_tokenizer = AutoTokenizer.from_pretrained(nomic_id)
|
| 15 |
+
nomic_model = AutoModel.from_pretrained(nomic_id)
|
| 16 |
+
|
| 17 |
+
# --- FAISS Setup ---
|
| 18 |
+
doc_store = []
|
| 19 |
+
doc_index = None
|
| 20 |
+
|
| 21 |
+
def embed(texts):
|
| 22 |
+
inputs = nomic_tokenizer(texts, return_tensors="pt", padding=True, truncation=True)
|
| 23 |
+
with torch.no_grad():
|
| 24 |
+
outputs = nomic_model(**inputs)
|
| 25 |
+
embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
|
| 26 |
+
return embeddings
|
| 27 |
+
|
| 28 |
+
def add_documents(text_block):
|
| 29 |
+
global doc_index, doc_store
|
| 30 |
+
docs = [line.strip() for line in text_block.split("\n") if line.strip()]
|
| 31 |
+
vectors = embed(docs)
|
| 32 |
+
|
| 33 |
+
doc_store.extend(docs)
|
| 34 |
+
|
| 35 |
+
if doc_index is None:
|
| 36 |
+
doc_index = faiss.IndexFlatL2(vectors.shape[1])
|
| 37 |
+
doc_index.add(vectors)
|
| 38 |
|
| 39 |
+
return f"Added {len(docs)} documents."
|
|
|
|
| 40 |
|
| 41 |
+
def chat_with_tinyllama(question):
|
| 42 |
+
if doc_index is None or len(doc_store) == 0:
|
| 43 |
+
context = "No documents uploaded yet."
|
| 44 |
+
else:
|
| 45 |
+
q_embed = embed([question])
|
| 46 |
+
D, I = doc_index.search(q_embed, 1)
|
| 47 |
+
context = doc_store[I[0][0]]
|
| 48 |
|
| 49 |
+
prompt = f"<|system|> You are a helpful assistant.\n<|user|> Context: {context}\nQuestion: {question}\n<|assistant|>"
|
| 50 |
+
input_ids = llama_tokenizer(prompt, return_tensors="pt").input_ids
|
| 51 |
+
output = llama_model.generate(input_ids, max_new_tokens=100, do_sample=True)
|
| 52 |
+
answer = llama_tokenizer.decode(output[0], skip_special_tokens=True)
|
| 53 |
+
return answer.split("<|assistant|>")[-1].strip()
|
| 54 |
|
| 55 |
+
# --- Gradio Interface ---
|
| 56 |
+
with gr.Blocks() as demo:
|
| 57 |
+
gr.Markdown("# π€ TinyLlama + Nomic Chatbot")
|
| 58 |
+
with gr.Row():
|
| 59 |
+
doc_input = gr.Textbox(lines=5, label="Upload Documents (one per line)")
|
| 60 |
+
upload_btn = gr.Button("Add to Knowledge Base")
|
| 61 |
|
| 62 |
+
with gr.Row():
|
| 63 |
+
question = gr.Textbox(lines=1, label="Ask a Question")
|
| 64 |
+
response = gr.Textbox(lines=4, label="TinyLlama Response")
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
upload_btn.click(add_documents, inputs=doc_input, outputs=doc_input)
|
| 67 |
+
question.submit(chat_with_tinyllama, inputs=question, outputs=response)
|
| 68 |
|
| 69 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|