axz91 commited on
Commit
c17b205
·
1 Parent(s): ff54e53

Add application file

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
3
+ from llama_index.callbacks.base import CallbackManager
4
+ from llama_index import (
5
+ LLMPredictor,
6
+ ServiceContext,
7
+ StorageContext,
8
+ load_index_from_storage,
9
+ )
10
+ from langchain.chat_models import ChatOpenAI
11
+ import chainlit as cl
12
+
13
+
14
+ import openai
15
+ from llama_index import SimpleDirectoryReader, Document, StorageContext, OpenAIEmbedding, ServiceContext, PromptHelper
16
+ from llama_index.node_parser import SimpleNodeParser
17
+ from llama_index.text_splitter import TokenTextSplitter
18
+ from llama_index.indices.vector_store import VectorStoreIndex
19
+ from llama_index.vector_stores import SupabaseVectorStore
20
+ from llama_index.llms import OpenAI
21
+ from llama_index import GPTVectorStoreIndex, StorageContext
22
+ from llama_index.vector_stores import MilvusVectorStore
23
+ from dotenv import load_dotenv
24
+ import os
25
+ from llama_index import get_response_synthesizer
26
+
27
+
28
+
29
+
30
+ openai.api_key = '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'
31
+
32
+
33
+ # Substitute your connection string here
34
+ DB_CONNECTION = "postgresql://postgres:xnam*P3gMWL9Wgp@db.yfeyscypngqzvtartbca.supabase.co:5432/postgres"
35
+ vector_store = SupabaseVectorStore(
36
+ postgres_connection_string=DB_CONNECTION,
37
+ collection_name='p53',memory = 21096
38
+ )
39
+
40
+
41
+ # rebuild storage context
42
+ storage_context = StorageContext.from_defaults(vector_store=vector_store)
43
+
44
+ prompt_helper = PromptHelper(
45
+ context_window=4096,
46
+ num_output=256,
47
+ chunk_overlap_ratio=0.1,
48
+ chunk_size_limit=None
49
+ )
50
+
51
+ llm = OpenAI(model='gpt-3.5-turbo', temperature=0, max_tokens=256)
52
+ embed_model = OpenAIEmbedding()
53
+ node_parser = SimpleNodeParser(
54
+ text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=100)
55
+ )
56
+
57
+ service_context = ServiceContext.from_defaults(
58
+ llm=llm,
59
+ embed_model=embed_model,
60
+ node_parser=node_parser,
61
+ prompt_helper=prompt_helper
62
+ )
63
+
64
+
65
+ index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
66
+ from llama_index import get_response_synthesizer
67
+
68
+
69
+
70
+
71
+
72
+ @cl.on_chat_start
73
+ async def factory():
74
+
75
+
76
+ query_engine = index.as_query_engine(
77
+ service_context=service_context,
78
+ streaming=True,
79
+ )
80
+
81
+ cl.user_session.set("query_engine", query_engine)
82
+
83
+
84
+ @cl.on_message
85
+ async def main(message):
86
+
87
+
88
+ query_engine = cl.user_session.get("query_engine") # type: RetrieverQueryEngine
89
+ response = await cl.make_async(query_engine.query)(message)
90
+
91
+ response_message = cl.Message(content="")
92
+
93
+ for token in response.response_gen:
94
+ response_message.content = response.response_txt
95
+
96
+ if response.response_txt:
97
+ response_message.content = response.response_txt
98
+
99
+ await response_message.send()
100
+