Update app.py
Browse files
app.py
CHANGED
|
@@ -1,65 +1,87 @@
|
|
| 1 |
|
| 2 |
-
|
| 3 |
-
from
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
from llama_index.core import Settings
|
| 7 |
-
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 8 |
-
from llama_index.core import ServiceContext
|
| 9 |
-
# from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
| 10 |
-
|
| 11 |
-
documents = SimpleDirectoryReader('files').load_data()
|
| 12 |
-
|
| 13 |
-
system_prompt="""
|
| 14 |
-
You are a Q&A assistant. Your goal is to answer questions as
|
| 15 |
-
accurately as possible based on the instructions and context provided.
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
embed_model= HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
| 19 |
-
|
| 20 |
-
llm = HuggingFaceLLM(
|
| 21 |
-
context_window=4096,
|
| 22 |
-
max_new_tokens=256,
|
| 23 |
-
generate_kwargs={"temperature": 0.1, "do_sample": True},
|
| 24 |
-
system_prompt=system_prompt,
|
| 25 |
-
tokenizer_name="anasmkh/new_customized_llama3.1_8b",
|
| 26 |
-
model_name="anasmkh/new_customized_llama3.1_8b",
|
| 27 |
-
device_map="auto",
|
| 28 |
-
model_kwargs={"torch_dtype": torch.float16 }
|
| 29 |
-
)
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
|
|
|
|
|
|
|
|
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
Settings.embed_model =embed_model
|
| 36 |
-
# Settings.node_parser = SentenceSplitter(chunk_size=512, chunk_overlap=20)
|
| 37 |
-
Settings.num_output = 250
|
| 38 |
-
Settings.context_window = 3900
|
| 39 |
|
| 40 |
index = VectorStoreIndex.from_documents(
|
| 41 |
-
documents,
|
|
|
|
| 42 |
)
|
| 43 |
|
| 44 |
-
query_engine = index.as_query_engine(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
|
|
|
| 47 |
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
-
history = history or []
|
| 51 |
-
history.append({"role": "user", "content": message})
|
| 52 |
-
response=query_engine.query(message)
|
| 53 |
-
# response = generator(history)[-1]["generated_text"]
|
| 54 |
-
history.append({"role": "assistant", "content": response})
|
| 55 |
-
return history
|
| 56 |
|
| 57 |
-
|
| 58 |
-
chatbot = gr.Chatbot()
|
| 59 |
-
message = gr.Textbox()
|
| 60 |
-
clear = gr.ClearButton([message, chatbot])
|
| 61 |
|
| 62 |
-
|
| 63 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
| 64 |
|
| 65 |
-
|
|
|
|
| 1 |
|
| 2 |
+
import os
|
| 3 |
+
from getpass import getpass
|
| 4 |
+
|
| 5 |
+
os.environ["OPENAI_API_KEY"] = getpass("Enter your OpenAI API key: ")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
from llama_index.llms.openai import OpenAI
|
| 11 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
| 12 |
from llama_index.core import Settings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
Settings.llm = OpenAI(model="gpt-3.5-turbo",temperature=0.4)
|
| 15 |
+
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
|
| 16 |
+
|
| 17 |
+
from llama_index.core import SimpleDirectoryReader
|
| 18 |
+
|
| 19 |
+
documents = SimpleDirectoryReader("/content/new_files").load_data()
|
| 20 |
|
| 21 |
+
from llama_index.core import VectorStoreIndex, StorageContext
|
| 22 |
+
from llama_index.vector_stores.qdrant import QdrantVectorStore
|
| 23 |
+
import qdrant_client
|
| 24 |
|
| 25 |
+
client = qdrant_client.QdrantClient(
|
| 26 |
+
location=":memory:",
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
vector_store = QdrantVectorStore(
|
| 30 |
+
collection_name = "paper",
|
| 31 |
+
client=client,
|
| 32 |
+
enable_hybrid=True,
|
| 33 |
+
batch_size=20,
|
| 34 |
+
)
|
| 35 |
|
| 36 |
+
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
index = VectorStoreIndex.from_documents(
|
| 39 |
+
documents,
|
| 40 |
+
storage_context=storage_context,
|
| 41 |
)
|
| 42 |
|
| 43 |
+
query_engine = index.as_query_engine(
|
| 44 |
+
vector_store_query_mode="hybrid"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
| 48 |
+
|
| 49 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
|
| 50 |
+
|
| 51 |
+
chat_engine = index.as_chat_engine(
|
| 52 |
+
chat_mode="context",
|
| 53 |
+
memory=memory,
|
| 54 |
+
system_prompt=(
|
| 55 |
+
"You are an AI assistant who answers the user questions"
|
| 56 |
+
),
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
import gradio as gr
|
| 60 |
+
def chat_with_ai(user_input, chat_history):
|
| 61 |
+
response = chat_engine.chat(user_input)
|
| 62 |
+
|
| 63 |
+
chat_history = chat_history + [(user_input, str(response))]
|
| 64 |
+
|
| 65 |
+
return chat_history, ""
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def gradio_chatbot():
|
| 69 |
+
with gr.Blocks() as demo:
|
| 70 |
+
gr.Markdown("# Gradio Chat Interface for LlamaIndex")
|
| 71 |
|
| 72 |
+
chatbot = gr.Chatbot(label="LlamaIndex Chatbot")
|
| 73 |
+
user_input = gr.Textbox(
|
| 74 |
+
placeholder="Ask a question...", label="Enter your question"
|
| 75 |
+
)
|
| 76 |
|
| 77 |
+
submit_button = gr.Button("Send")
|
| 78 |
|
| 79 |
+
chat_history = gr.State([])
|
| 80 |
|
| 81 |
+
submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input])
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
+
return demo
|
|
|
|
| 86 |
|
| 87 |
+
gradio_chatbot().launch(debug=True)
|