Update app.py
Browse files
app.py
CHANGED
|
@@ -1,34 +1,29 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
from llama_index import
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
import chainlit as cl
|
| 12 |
|
| 13 |
|
|
|
|
|
|
|
|
|
|
| 14 |
import openai
|
| 15 |
-
from llama_index import
|
|
|
|
|
|
|
| 16 |
from llama_index.node_parser import SimpleNodeParser
|
| 17 |
from llama_index.text_splitter import TokenTextSplitter
|
| 18 |
-
from llama_index.indices.vector_store import VectorStoreIndex
|
| 19 |
-
from llama_index.vector_stores import SupabaseVectorStore
|
| 20 |
-
from llama_index.llms import OpenAI
|
| 21 |
-
from llama_index import GPTVectorStoreIndex, StorageContext
|
| 22 |
-
from dotenv import load_dotenv
|
| 23 |
-
import os
|
| 24 |
-
from llama_index import get_response_synthesizer
|
| 25 |
-
from llama_index.vector_stores import DeepLakeVectorStore
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
os.environ["OPENAI_API_KEY"] = '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'
|
| 31 |
-
os.environ["ACTIVELOOP_TOKEN"] = "eyJhbGciOiJIUzUxMiIsImlhdCI6MTY5MzM2NTU0OSwiZXhwIjoxNjk2MDQzOTM5fQ.eyJpZCI6ImN4Y3h4YWFhYWF6In0.5syabu1oVCO1tzkDTFxU8SxFbYtkdzoerPSYebYeOpLGCeO2YrIQClCN02Ob-wEfunsei5evahzSS-KvFz79wg"
|
| 32 |
|
| 33 |
|
| 34 |
dataset_path ="hub://cxcxxaaaaaz/text_embedding" # if we comment this out and don't pass the path then GPTDeepLakeIndex will create dataset in memory
|
|
@@ -37,16 +32,53 @@ from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
|
|
| 37 |
from llama_index.vector_stores import DeepLakeVectorStore
|
| 38 |
|
| 39 |
# Create an index over the documnts
|
| 40 |
-
vector_store = DeepLakeVectorStore(dataset_path=dataset_path
|
| 41 |
)
|
| 42 |
-
|
| 43 |
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
| 44 |
-
|
| 45 |
-
index = VectorStoreIndex.from_documents([], storage_context=storage_context)
|
| 46 |
|
| 47 |
|
| 48 |
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
|
| 52 |
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import textwrap
|
| 3 |
+
|
| 4 |
+
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
|
| 5 |
+
from llama_index.vector_stores import DeepLakeVectorStore
|
| 6 |
+
import deeplake
|
| 7 |
+
|
| 8 |
+
os.environ["OPENAI_API_KEY"] = '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'
|
| 9 |
+
os.environ["ACTIVELOOP_TOKEN"] = "eyJhbGciOiJIUzUxMiIsImlhdCI6MTY5MzM2NTU0OSwiZXhwIjoxNjk2MDQzOTM5fQ.eyJpZCI6ImN4Y3h4YWFhYWF6In0.5syabu1oVCO1tzkDTFxU8SxFbYtkdzoerPSYebYeOpLGCeO2YrIQClCN02Ob-wEfunsei5evahzSS-KvFz79wg"
|
| 10 |
+
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
+
from llama_index import SimpleDirectoryReader, Document, StorageContext, OpenAIEmbedding, ServiceContext, PromptHelper, VectorStoreIndex
|
| 14 |
+
from llama_index.vector_stores import PineconeVectorStore, QdrantVectorStore, SimpleVectorStore, DeepLakeVectorStore
|
| 15 |
+
from transformers import BertTokenizerFast
|
| 16 |
import openai
|
| 17 |
+
from llama_index.llms import OpenAI
|
| 18 |
+
from llama_index import ServiceContext
|
| 19 |
+
from llama_index.embeddings import OpenAIEmbedding
|
| 20 |
from llama_index.node_parser import SimpleNodeParser
|
| 21 |
from llama_index.text_splitter import TokenTextSplitter
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
+
openai.api_key = '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'
|
| 24 |
+
from llama_index import StorageContext, load_index_from_storage
|
| 25 |
+
from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage
|
| 26 |
|
|
|
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
dataset_path ="hub://cxcxxaaaaaz/text_embedding" # if we comment this out and don't pass the path then GPTDeepLakeIndex will create dataset in memory
|
|
|
|
| 32 |
from llama_index.vector_stores import DeepLakeVectorStore
|
| 33 |
|
| 34 |
# Create an index over the documnts
|
| 35 |
+
vector_store = DeepLakeVectorStore(dataset_path=dataset_path
|
| 36 |
)
|
| 37 |
+
|
| 38 |
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
|
|
|
|
|
|
| 39 |
|
| 40 |
|
| 41 |
|
| 42 |
+
llm = OpenAI(model='gpt-3.5-turbo', temperature=0, max_tokens=3924)
|
| 43 |
+
embed_model = OpenAIEmbedding()
|
| 44 |
+
node_parser = SimpleNodeParser(
|
| 45 |
+
text_splitter=TokenTextSplitter(chunk_size=3924, chunk_overlap=10)
|
| 46 |
+
)
|
| 47 |
+
prompt_helper = PromptHelper(
|
| 48 |
+
context_window=4096,
|
| 49 |
+
num_output=256,
|
| 50 |
+
chunk_overlap_ratio=0.1,
|
| 51 |
+
chunk_size_limit=20
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
from deeplake.core.vectorstore import VectorStore
|
| 55 |
+
import tiktoken
|
| 56 |
+
from llama_index.callbacks import CallbackManager, TokenCountingHandler
|
| 57 |
+
from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage
|
| 58 |
+
|
| 59 |
+
token_counter = TokenCountingHandler(
|
| 60 |
+
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
callback_manager = CallbackManager([token_counter])
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
service_context = ServiceContext.from_defaults(
|
| 67 |
+
llm=llm,
|
| 68 |
+
embed_model=embed_model,
|
| 69 |
+
node_parser=node_parser,
|
| 70 |
+
prompt_helper=prompt_helper, callback_manager=callback_manager
|
| 71 |
+
)
|
| 72 |
+
from llama_index import set_global_service_context
|
| 73 |
+
#set_global_service_context(service_context)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
index = VectorStoreIndex.from_documents([], vectorstore=vector_store, storage_context=storage_context, service_context=service_context,show_progress=True)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
|
| 83 |
|
| 84 |
|