Jonathan Card commited on
Commit
0bd536c
·
1 Parent(s): edd2137

Working answer query

Browse files
Files changed (3) hide show
  1. llm_service.py +13 -7
  2. requirements.txt +2 -1
  3. test/test_llm_service.py +22 -3
llm_service.py CHANGED
@@ -3,6 +3,7 @@ import chromadb
3
  from chromadb.config import Settings
4
  from sentence_transformers import SentenceTransformer
5
  import pandas as pd
 
6
 
7
  class LLMService(object):
8
  def __init__(self):
@@ -17,7 +18,8 @@ class LLMService(object):
17
  llm_service = DefaultLLMService(self._openAIKey)
18
  yield llm_service
19
  finally:
20
- llm_service.close()
 
21
 
22
  def close(self):
23
  raise Exception("Should not use the base class")
@@ -41,14 +43,16 @@ class DefaultLLMService(LLMService):
41
  self.build_chromadb()
42
 
43
  def close(self):
44
- raise Exception("Not implemented")
 
45
 
46
  def get_chromadb(self,clear=0):
47
  client=chromadb.Client(Settings(
48
  persist_directory="./chroma_db"
49
  ))
50
- if clear:
51
- client.delete_collection(collection_name)
 
52
  return client.get_or_create_collection(name=collection_name)
53
 
54
  def build_chromadb(self):
@@ -62,7 +66,7 @@ class DefaultLLMService(LLMService):
62
  collection.add(
63
  documents=texts,
64
  embeddings=embeddings,
65
- metadatas=[df.iloc[i,3:11].to_dict() for i in range(len(texts))], #store everything except clinical notes and ids as metadata
66
  ids=[str(i) for i in range(len(texts))]
67
  )
68
 
@@ -115,7 +119,7 @@ class DefaultLLMService(LLMService):
115
  # Has this patient ...?
116
  # Does this patient have a history of ...?
117
  # TODO: Find in vector database the most related docs to both 1. patient & 2. query
118
- rag=self.query_chromadb(patient,query)
119
  # TODO: Figure out how to utilize other columns.
120
  prompt_template="""
121
  You are an AI Assistant answering questions about a patient based on the relevant patient information provided.\n
@@ -134,4 +138,6 @@ class DefaultLLMService(LLMService):
134
  messages=[{"role": "user", "content": filled_prompt}],
135
  temperature=0
136
  )
137
- return response
 
 
 
3
  from chromadb.config import Settings
4
  from sentence_transformers import SentenceTransformer
5
  import pandas as pd
6
+ from openai import OpenAI
7
 
8
  class LLMService(object):
9
  def __init__(self):
 
18
  llm_service = DefaultLLMService(self._openAIKey)
19
  yield llm_service
20
  finally:
21
+ if llm_service is not None:
22
+ llm_service.close()
23
 
24
  def close(self):
25
  raise Exception("Should not use the base class")
 
43
  self.build_chromadb()
44
 
45
  def close(self):
46
+ #raise Exception("Not implemented")
47
+ pass
48
 
49
  def get_chromadb(self,clear=0):
50
  client=chromadb.Client(Settings(
51
  persist_directory="./chroma_db"
52
  ))
53
+ collection_name = "patient_data"
54
+ #if clear:
55
+ # client.delete_collection(collection_name)
56
  return client.get_or_create_collection(name=collection_name)
57
 
58
  def build_chromadb(self):
 
66
  collection.add(
67
  documents=texts,
68
  embeddings=embeddings,
69
+ metadatas=[self._df.iloc[i,3:11].to_dict() for i in range(len(texts))], #store everything except clinical notes and ids as metadata
70
  ids=[str(i) for i in range(len(texts))]
71
  )
72
 
 
119
  # Has this patient ...?
120
  # Does this patient have a history of ...?
121
  # TODO: Find in vector database the most related docs to both 1. patient & 2. query
122
+ rag=self.query_chromadb(patient,query, "")
123
  # TODO: Figure out how to utilize other columns.
124
  prompt_template="""
125
  You are an AI Assistant answering questions about a patient based on the relevant patient information provided.\n
 
138
  messages=[{"role": "user", "content": filled_prompt}],
139
  temperature=0
140
  )
141
+ # TODO: Error handling for 0 choices
142
+ print(response)
143
+ return response.choices[0].message.content
requirements.txt CHANGED
@@ -4,4 +4,5 @@ openai
4
  python-dotenv
5
  langchain_openai
6
  chromadb
7
- sentence-transformers
 
 
4
  python-dotenv
5
  langchain_openai
6
  chromadb
7
+ sentence-transformers
8
+ numpy<2
test/test_llm_service.py CHANGED
@@ -1,5 +1,6 @@
1
  import unittest
2
  from llm_service import LLMService
 
3
 
4
  class TestLlmService(unittest.TestCase):
5
  def test_initialize(self):
@@ -22,8 +23,26 @@ class TestLlmService(unittest.TestCase):
22
  except Exception as e:
23
  pass
24
 
25
- def test_get_summary(self):
26
  """
27
- Tests the generation of the patient summary.
28
  """
29
- self.fail("Test not implemented yet")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import unittest
2
  from llm_service import LLMService
3
+ import os
4
 
5
  class TestLlmService(unittest.TestCase):
6
  def test_initialize(self):
 
23
  except Exception as e:
24
  pass
25
 
26
+ def test_simple_query(self):
27
  """
28
+ Tests querying the chroma database.
29
  """
30
+ with LLMService().with_key(os.getenv("OPENAI_API_KEY")).build() as llm_service:
31
+ result = llm_service.query_chromadb(0, "tell me about dental visits", "")
32
+ print(result)
33
+
34
+ def test_simple_summary(self):
35
+ """
36
+ Tests the patient summary
37
+ """
38
+ with LLMService().with_key(os.getenv("OPENAI_API_KEY")).build() as llm_service:
39
+ result = llm_service.get_summary(0)
40
+ print(result)
41
+
42
+ def test_simple_answer_query(self):
43
+ """
44
+ Tests a simple query
45
+ """
46
+ with LLMService().with_key(os.getenv("OPENAI_API_KEY")).build() as llm_service:
47
+ result = llm_service.answer_query(0, "tell me about dental visits")
48
+ print(result)