YoniFriedman commited on
Commit
ab91d64
·
verified ·
1 Parent(s): 0f79f77

Update to match deployed version

Browse files
Files changed (1) hide show
  1. app.py +53 -41
app.py CHANGED
@@ -1,64 +1,76 @@
 
 
 
 
1
  from llama_index.llms.openai import OpenAI
2
  from llama_index.core.schema import MetadataMode
3
- from llama_index.core import VectorStoreIndex
4
- from llama_index.core import StorageContext
5
- from llama_index.core import load_index_from_storage
6
- import os
7
  import openai
8
- os.environ["OPENAI_API_KEY"]
9
  import logging
10
  import sys
11
- llm = OpenAI(temperature=0.0, model="gpt-3.5-turbo", max_tokens=512)
 
 
 
 
 
 
12
 
13
- PERSIST_DIR = "arv_metadata"
14
- storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
 
 
 
15
  index = load_index_from_storage(storage_context)
16
  query_engine = index.as_query_engine(similarity_top_k=3, llm=llm)
17
 
 
18
 
19
- preamble = (" The person asking the following prompt is a person living with HIV in Kenya."
20
- " For every response, recognize that they already have HIV and do not suggest that they have to get tested"
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  " for HIV or take post-exposure prophylaxis, as that is not relevant, though their partners perhaps should."
22
  " Do not suggest anything that is not relevant to someone who already has HIV."
23
- " They are asking questions through a mobile application called Nishauri"
24
- " through which they can see their lab results, appointment histories, and upcoming appointments."
25
- " Questions about those topics should be answered based on Nishauri user guide."
26
- " Here is some information that is authoritative and should guide responses, when relevant."
27
- " For questions about viral load, be sure to provide specific information"
28
- " about cutoffs for viral load categories. Under 50 copies/ml is low detectable level,"
29
  " 50 - 199 copies/ml is low level viremia, 200 - 999 is high level viremia, and "
30
  " 1000 and above is suspected treatment failure."
31
  " A high viral load or non-suppressed viral load is any viral load above 200 copies/ml."
32
- " A suppressed viral load is one below 200 copies / ml."
33
- " An established client is one who is on their current ART regimen for a period greater"
34
- " than 6 months, had no active OI or in the previous 6 months, has adhered to scheduled"
35
- " clinic visits for the previous 6 months and Viral load results has been less than 200 copies/ml"
36
- " within the last 6 months."
37
- " For questions about when patients should get their viral loads taken,"
38
- " if they are newly initiated on ART, the first viral load sample should be taken after 3 months of"
39
- " taking ART. Otherwise, if they are not new on ART, then if their previous result was below 50 to 199 cp/ml,"
40
- " their viral load should be taken after every 12 months. If their previous result was above 200cp/ml,"
41
- " then viral load sample should be taken after three months."
42
- " Please answer the prompt using the information retrieved"
43
- " and do not rely at all on your prior knowledge."
44
- # " Please keep your reply to no longer than three sentences, and please use simple language. ")
45
- )
46
 
47
- prompt_intro = (" Here is the prompt: ")
 
 
 
 
 
48
 
49
- import gradio as gr
 
 
 
 
 
50
 
51
- def nishauri(question: str, conversation_history: list[str]):
52
 
 
 
53
 
54
- context = " ".join([item["user"] + " " + item["chatbot"] for item in conversation_history])
55
- response = query_engine.query(preamble +
56
- "the user previously asked and received the following: " +
57
- context +
58
- prompt_intro +
59
- question)
60
-
61
- conversation_history.append({"user": question, "chatbot": response.response})
62
 
63
  source1 = ("File Name: " +
64
  response.source_nodes[0].metadata["file_name"] +
 
1
+ 3.25 kB
2
+ import os
3
+ os.environ["OPENAI_API_KEY"]
4
+
5
  from llama_index.llms.openai import OpenAI
6
  from llama_index.core.schema import MetadataMode
 
 
 
 
7
  import openai
8
+ from openai import OpenAI as OpenAIOG
9
  import logging
10
  import sys
11
+ llm = OpenAI(temperature=0.0, model="gpt-3.5-turbo")
12
+ client = OpenAIOG()
13
+
14
+ from langdetect import detect
15
+ from langdetect import DetectorFactory
16
+ DetectorFactory.seed = 0
17
+ from deep_translator import GoogleTranslator
18
 
19
+ # Load index
20
+ from llama_index.core import VectorStoreIndex
21
+ from llama_index.core import StorageContext
22
+ from llama_index.core import load_index_from_storage
23
+ storage_context = StorageContext.from_defaults(persist_dir="arv_metadata")
24
  index = load_index_from_storage(storage_context)
25
  query_engine = index.as_query_engine(similarity_top_k=3, llm=llm)
26
 
27
+ import gradio as gr
28
 
29
+ def nishauri(question: str, conversation_history: list[str]):
30
+
31
+
32
+ context = " ".join([item["user"] + " " + item["chatbot"] for item in conversation_history])
33
+
34
+ lang_question = detect(question)
35
+
36
+ if lang_question=="sw":
37
+ question = GoogleTranslator(source='sw', target='en').translate(question)
38
+
39
+ response = query_engine.query(question)
40
+
41
+ background = ("The person who asked the question is a person living with HIV."
42
+ " If the person says sasa or niaje, that is swahili slang for hello."
43
+ " Recognize that they already have HIV and do not suggest that they have to get tested"
44
  " for HIV or take post-exposure prophylaxis, as that is not relevant, though their partners perhaps should."
45
  " Do not suggest anything that is not relevant to someone who already has HIV."
46
+ " Do not mention in the response that the person is living with HIV."
47
+ " The following information about viral loads is authoritative for any question about viral loads:"
48
+ " Under 50 copies/ml is low detectable level,"
 
 
 
49
  " 50 - 199 copies/ml is low level viremia, 200 - 999 is high level viremia, and "
50
  " 1000 and above is suspected treatment failure."
51
  " A high viral load or non-suppressed viral load is any viral load above 200 copies/ml."
52
+ " A suppressed viral load is one below 200 copies / ml.")
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ question_final = (
55
+ f"The user previously asked and answered the following: {context}"
56
+ f" The user just asked the following question: {question}"
57
+ f" The following response was generated in response: {response}"
58
+ f" Please update the response provided only if needed, based on the following background information {background}"
59
+ )
60
 
61
+ completion = client.chat.completions.create(
62
+ model="gpt-3.5-turbo",
63
+ messages=[
64
+ {"role": "user", "content": question_final}
65
+ ]
66
+ )
67
 
68
+ reply_to_user = completion.choices[0].message.content
69
 
70
+ if lang_question=="sw":
71
+ reply_to_user = GoogleTranslator(source='auto', target='sw').translate(reply_to_user)
72
 
73
+ conversation_history.append({"user": question, "chatbot": response.response})
 
 
 
 
 
 
 
74
 
75
  source1 = ("File Name: " +
76
  response.source_nodes[0].metadata["file_name"] +