AXZ91 commited on
Commit
11b8ca3
·
1 Parent(s): 1005a13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -3,7 +3,6 @@ import textwrap
3
 
4
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
5
  from llama_index.vector_stores import DeepLakeVectorStore
6
- import deeplake
7
  from langchain.chat_models import ChatOpenAI
8
  import chainlit as cl
9
  os.environ["OPENAI_API_KEY"] = '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'
@@ -27,7 +26,7 @@ from llama_index import load_index_from_storage, load_indices_from_storage, load
27
 
28
 
29
 
30
- dataset_path ="hub://cxcxxaaaaaz/text_embedding" # if we comment this out and don't pass the path then GPTDeepLakeIndex will create dataset in memory
31
  from llama_index.storage.storage_context import StorageContext
32
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
33
  from llama_index.vector_stores import ChromaVectorStore
@@ -69,10 +68,10 @@ node_parser = SimpleNodeParser(
69
  )
70
 
71
  prompt_helper = PromptHelper(
72
- context_window=4096,
73
  num_output=256,
74
  chunk_overlap_ratio=0.1,
75
- chunk_size_limit=20
76
  )
77
 
78
  import tiktoken
@@ -93,9 +92,8 @@ service_context = ServiceContext.from_defaults(
93
  from llama_index import set_global_service_context
94
 
95
 
96
- index = VectorStoreIndex.from_documents([], vectorstore=vector_store, storage_context=storage_context, service_context=service_context)
97
-
98
 
 
99
 
100
 
101
 
 
3
 
4
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
5
  from llama_index.vector_stores import DeepLakeVectorStore
 
6
  from langchain.chat_models import ChatOpenAI
7
  import chainlit as cl
8
  os.environ["OPENAI_API_KEY"] = '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'
 
26
 
27
 
28
 
29
+ #dataset_path ="hub://cxcxxaaaaaz/text_embedding" # if we comment this out and don't pass the path then GPTDeepLakeIndex will create dataset in memory
30
  from llama_index.storage.storage_context import StorageContext
31
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
32
  from llama_index.vector_stores import ChromaVectorStore
 
68
  )
69
 
70
  prompt_helper = PromptHelper(
71
+ context_window=2000,
72
  num_output=256,
73
  chunk_overlap_ratio=0.1,
74
+ chunk_size_limit=200
75
  )
76
 
77
  import tiktoken
 
92
  from llama_index import set_global_service_context
93
 
94
 
 
 
95
 
96
+ index = VectorStoreIndex.from_documents([], vectorstore=vector_store, storage_context=storage_context, service_context=service_context)
97
 
98
 
99