Allen Ding commited on
Commit
b950f71
·
1 Parent(s): a76aafa
Files changed (2) hide show
  1. Dockerfile +8 -4
  2. app.py +4 -2
Dockerfile CHANGED
@@ -1,11 +1,15 @@
1
  FROM python:3.9
 
2
  RUN useradd -m -u 1000 user
3
  USER user
4
  ENV HOME=/home/user \
5
  PATH=/home/user/.local/bin:$PATH
6
  WORKDIR $HOME/app
7
- COPY --chown=user . $HOME/app
8
- COPY ./requirements.txt ~/app/requirements.txt
9
  RUN pip install -r requirements.txt
10
- COPY . .
11
- CMD ["chainlit", "run", "app.py", "--port", "7860"]
 
 
 
 
1
  FROM python:3.9
2
+ #FROM python:3.9 AS builder
3
  RUN useradd -m -u 1000 user
4
  USER user
5
  ENV HOME=/home/user \
6
  PATH=/home/user/.local/bin:$PATH
7
  WORKDIR $HOME/app
8
+ # COPY --chown=user . $HOME/app
9
+ COPY ./requirements.txt $HOME/app/requirements.txt
10
  RUN pip install -r requirements.txt
11
+
12
+ #FROM python:3.9
13
+ #COPY --from=builder /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages
14
+ COPY --chown=user . $HOME/app
15
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py CHANGED
@@ -39,13 +39,15 @@ class RetrievalAugmentedQAPipeline:
39
  # "response" : the "context" and "question" values are used to format our prompt object and then piped
40
  # into the LLM and stored in a key called "response"
41
  # "context" : populated by getting the value of the "context" key from the previous step
42
- | {"response": base_rag_prompt | base_llm, "context": itemgetter("context")}
43
  )
44
 
45
  async def arun_pipeline(self, user_query: str):
46
  async def generate_response():
 
 
47
  async for chunk in self.retrieval_augmented_qa_chain.astream({"question": user_query}):
48
- yield chunk
49
 
50
  return {"response": generate_response()}
51
 
 
39
  # "response" : the "context" and "question" values are used to format our prompt object and then piped
40
  # into the LLM and stored in a key called "response"
41
  # "context" : populated by getting the value of the "context" key from the previous step
42
+ | {"response": base_rag_prompt | base_llm }
43
  )
44
 
45
  async def arun_pipeline(self, user_query: str):
46
  async def generate_response():
47
+ # yield self.retrieval_augmented_qa_chain.invoke({"question": user_query})["response"].content
48
+
49
  async for chunk in self.retrieval_augmented_qa_chain.astream({"question": user_query}):
50
+ yield chunk["response"].content
51
 
52
  return {"response": generate_response()}
53