yinong333 commited on
Commit
6672feb
·
1 Parent(s): 41ecb0a

Deploying ChatwithLegal

Browse files
Files changed (2) hide show
  1. app.py +8 -5
  2. chainlit.md +1 -1
app.py CHANGED
@@ -24,6 +24,7 @@ import chainlit as cl
24
  from langchain_qdrant import QdrantVectorStore
25
  from qdrant_client import QdrantClient
26
  from qdrant_client.http.models import Distance, VectorParams
 
27
 
28
  system_template = """Use the following pieces of context to answer the users question.
29
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
@@ -43,13 +44,15 @@ messages = [
43
  prompt = ChatPromptTemplate.from_messages(messages)
44
  chain_type_kwargs = {"prompt": prompt}
45
 
46
-
47
  def generate_vdb(chunks):
48
- EMBEDDING_MODEL = "text-embedding-3-small"
49
- embeddings = OpenAIEmbeddings(model=EMBEDDING_MODEL)
 
50
  LOCATION = ":memory:"
51
  COLLECTION_NAME = "legal data"
52
- VECTOR_SIZE = 1536
 
53
 
54
  qdrant_client = QdrantClient(LOCATION)
55
 
@@ -112,7 +115,7 @@ async def on_chat_start():
112
  return_messages=True,
113
  )
114
 
115
- # Create a chain that uses the Chroma vector store
116
  chain = ConversationalRetrievalChain.from_llm(
117
  ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
118
  chain_type="stuff",
 
24
  from langchain_qdrant import QdrantVectorStore
25
  from qdrant_client import QdrantClient
26
  from qdrant_client.http.models import Distance, VectorParams
27
+ from langchain_huggingface import HuggingFaceEmbeddings
28
 
29
  system_template = """Use the following pieces of context to answer the users question.
30
  If you don't know the answer, just say that you don't know, don't try to make up an answer.
 
44
  prompt = ChatPromptTemplate.from_messages(messages)
45
  chain_type_kwargs = {"prompt": prompt}
46
 
47
+ huggingface_embeddings = HuggingFaceEmbeddings(model_name="yinong333/finetuned_MiniLM")
48
  def generate_vdb(chunks):
49
+ #EMBEDDING_MODEL = "text-embedding-3-small"
50
+ #embeddings = OpenAIEmbeddings(model=EMBEDDING_MODEL)
51
+ embeddings = huggingface_embeddings
52
  LOCATION = ":memory:"
53
  COLLECTION_NAME = "legal data"
54
+ #VECTOR_SIZE = 1536
55
+ VECTOR_SIZE = 384
56
 
57
  qdrant_client = QdrantClient(LOCATION)
58
 
 
115
  return_messages=True,
116
  )
117
 
118
+ # Create a chain that uses the Qdrant vector store
119
  chain = ConversationalRetrievalChain.from_llm(
120
  ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
121
  chain_type="stuff",
chainlit.md CHANGED
@@ -1,3 +1,3 @@
1
- # Chat with Legal PDF
2
 
3
  This Chainlit app was created following instructions from [this repository!](https://github.com/AI-Maker-Space/Beyond-ChatGPT)
 
1
+ # Chat with Legal PDF (Loading Files Now...)
2
 
3
  This Chainlit app was created following instructions from [this repository!](https://github.com/AI-Maker-Space/Beyond-ChatGPT)