Roland Ding commited on
Commit
ef80389
·
1 Parent(s): 5f0eb5f

5.4.15.42 combined chatgpt-turbo-16k and search term prompt identification feature. additionally aligned with the cloud data structure for seperation of fields and prompts, added ai formating instruction and simlified ui.

Browse files
Files changed (7) hide show
  1. app.py +4 -3
  2. application.py +6 -6
  3. cloud_storage.py +3 -3
  4. features.py +105 -147
  5. supplier.py +64 -5
  6. ui_study.py +21 -59
  7. utility.py +0 -2
app.py CHANGED
@@ -18,9 +18,10 @@ examples = []
18
  # app_theme = gr.themes.Base(neutral_hue="blue")
19
 
20
  demo = gr.TabbedInterface(
21
- [device_page,study_page,equivalent_page],
22
- ["Device","Clinical Study Report","Equivalent Comparators"],
23
- # theme= app_theme,
 
24
  theme = gr.themes.Soft(primary_hue="sky",secondary_hue="orange"),
25
  css = "footer {visibility: hidden}",
26
  title="AMRA AI Medi Reader")
 
18
  # app_theme = gr.themes.Base(neutral_hue="blue")
19
 
20
  demo = gr.TabbedInterface(
21
+ # [device_page,study_page,equivalent_page],
22
+ # ["Device","Clinical Study Report","Equivalent Comparators"],
23
+ [study_page],
24
+ ["Clinical Study"],
25
  theme = gr.themes.Soft(primary_hue="sky",secondary_hue="orange"),
26
  css = "footer {visibility: hidden}",
27
  title="AMRA AI Medi Reader")
application.py CHANGED
@@ -57,7 +57,8 @@ data_structure = {
57
  "term",
58
  "clinical term",
59
  "summary term",
60
- "template_name"
 
61
  ]},
62
  "prompts":{
63
  "key":[
@@ -72,6 +73,8 @@ data_structure = {
72
  "levels",
73
  "preoperatives",
74
  "prompt",
 
 
75
  ]
76
  },
77
  "articles":{
@@ -92,6 +95,7 @@ data_structure = {
92
  "key":[
93
  "domain",
94
  "article",
 
95
  ],
96
  "fields":[
97
  "domain",
@@ -105,12 +109,8 @@ data_structure = {
105
  application default data
106
  '''
107
  app_data = {
108
- "articles":[],
109
  "terms":[],
110
  "prompts":[],
111
  "outputs":[]
112
  }
113
-
114
-
115
- # hypothesis:
116
- # normal abstract length is about 800 characters
 
57
  "term",
58
  "clinical term",
59
  "summary term",
60
+ "template_name",
61
+ "terms"
62
  ]},
63
  "prompts":{
64
  "key":[
 
73
  "levels",
74
  "preoperatives",
75
  "prompt",
76
+ "fields",
77
+ "reformat_inst"
78
  ]
79
  },
80
  "articles":{
 
95
  "key":[
96
  "domain",
97
  "article",
98
+ "outcomes"
99
  ],
100
  "fields":[
101
  "domain",
 
109
  application default data
110
  '''
111
  app_data = {
112
+ "current_article":{},
113
  "terms":[],
114
  "prompts":[],
115
  "outputs":[]
116
  }
 
 
 
 
cloud_storage.py CHANGED
@@ -48,11 +48,11 @@ def upload_fileobj(file_obj, bucket, object_name=None):
48
  object_name = file_obj.name
49
 
50
  try:
51
- s3.upload_fileobj(file_obj, bucket, object_name)
52
  except Exception as e:
53
  print(e)
54
- return False
55
- return True
56
 
57
  # get a file from s3
58
  def download_file(bucket, object_name, file_name=None):
 
48
  object_name = file_obj.name
49
 
50
  try:
51
+ res = s3.upload_fileobj(file_obj, bucket, object_name)
52
  except Exception as e:
53
  print(e)
54
+ return e
55
+ return res
56
 
57
  # get a file from s3
58
  def download_file(bucket, object_name, file_name=None):
features.py CHANGED
@@ -1,8 +1,5 @@
1
  # language default packages
2
  from datetime import datetime
3
- from operator import mul
4
- from functools import reduce
5
- from sys import stdout
6
  from collections import defaultdict
7
 
8
  # external packages
@@ -29,10 +26,6 @@ def init_app_data():
29
  def process_study(
30
  study_file_obj,
31
  study_content,
32
- performance_metric_1,
33
- performance_metric_2,
34
- safety_metric_1,
35
- safety_metric_2,
36
  device=default_device
37
  ):
38
 
@@ -43,89 +36,69 @@ def process_study(
43
  else:
44
  return "No file or content provided","No file or content provided","No file or content provided"
45
 
46
- prompts = select_prompts( # need to identify how the app will know which prompts to use
47
- article,
48
- performance_metric_1,
49
- performance_metric_2,
50
- safety_metric_1,
51
- safety_metric_2
52
-
53
- )
54
- # print("check prompts",prompts)
55
 
56
  output = {
57
  "domain":article["domain"],
58
  "article":article["name"],
59
- "output":defaultdict(dict)
60
  }
61
 
62
- for p in prompts:
63
- prompt_string = ""
64
- for s in p["sections"].split(","):
65
- prompt_string += f"{article[s]}"
66
-
67
- prompt_string += f"\n {p['prompt']}"
68
- with open(f".prompts/{article['name']}_{p['template_name']}.txt","w") as f:
69
- f.write(prompt_string)
70
-
71
- res = execute_prompt(prompt_string)
72
-
73
- with open(f".outputs/{article['name']}_{p['template_name']}.txt","w") as f:
74
- f.write(res)
75
-
76
- output["output"][p["assessment_step"]][p["template_name"]]=res
77
-
78
-
79
 
80
- overview = create_overview(output["output"]["overview"])
81
- details = create_details(output["output"])
82
 
83
  add_output(output)
84
 
85
- return overview, details
 
86
 
87
  def refresh():
88
  '''
89
  this function refresh the application data from the cloud backend
90
  '''
91
  init_app_data()
92
- return "refreshed", "refreshed"
93
 
94
- def create_overview(overview_list):
95
- '''
96
- '''
97
- md_text = "## Overview\n\n"
98
- md_text += "| attributes | detail |\n|:---|:---|\n"
99
- for _,v in overview_list.items():
100
- r = v.replace("\n\n","")
101
- rows = r.split("\n")
102
- for r in rows:
103
- c = r.replace(": "," | ")
104
- md_text += f"| {c} |\n"
105
- # with open("overview.md","w") as f:
106
- # f.write(md_text)
107
- return gr.update(value=md_text)
108
 
109
- def create_details(output):
110
- sections = ["clinical", "radiographic", "fussion assessment", "other","safety"]
111
- titles = ["Clinical Outcomes", "Radiological Outcomes", "Fussion Assessment", "Other Outcomes","Safety Outcomes"]
 
 
 
112
 
 
113
  md_text = ""
114
- for section, title in zip(sections, titles):
115
- md_text += f"## {title}\n\n"
116
-
117
- for key,table in output[section].items():
118
- md_text += f"### {key} \n\n"
119
- rows = table.replace("\n\n","").split("\n")
120
- for i,r in enumerate(rows):
121
- cells = r.split("\t")
122
- md_text += f"| {' | '.join(cells)} |\n"
123
- if i == 0:
124
- md_text += "| --- "*len(cells)+"|\n"
125
-
126
- md_text += "\n\n"
127
- # with open("details.md","w") as f:
128
- # f.write(md_text)
 
 
 
 
129
  return gr.update(value=md_text)
130
 
131
 
@@ -239,29 +212,22 @@ def add_article(domain,file,add_to_s3=True, add_to_local=True, file_object=True)
239
  '''
240
  if file_object:
241
  content, _ = read_pdf(file)
242
- name = file.name.split("\\")[-1].split(".")[0]
 
243
  else:
244
  content = file
245
- name = f"temp_{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
246
-
247
- abstract,_,end_abstract = extract_key_content(content,["objective","abstract"],["key","words:","methods"],["introduction"])
248
- methods,_,end_methods = extract_key_content(content[end_abstract:],["methods"],["results"])
249
- if not methods:
250
- methods,_,end_methods = extract_key_content(content[end_abstract:],["methods"],["discussion"])
251
- results,_,_ = extract_key_content(content[end_methods:],["results"],["discussion"])
252
 
253
  article ={
254
  "domain":domain,
255
- "name":name,
256
  "content":content,
257
- "abstract":abstract,
258
- "methods":methods,
259
- "results":results,
260
  "upload_time":datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
261
  }
262
 
263
  if add_to_s3 and file_object:
264
- s3_path = upload_fileobj(file,domain,article["name"])
265
  article["s3_path"] = s3_path
266
 
267
  if add_to_local:
@@ -269,7 +235,7 @@ def add_article(domain,file,add_to_s3=True, add_to_local=True, file_object=True)
269
 
270
  res = post_item("articles",article)
271
  if "Error" in res:
272
- print(res)
273
  return res
274
 
275
  return article
@@ -387,72 +353,31 @@ def remove_device():
387
  def update_device():
388
  pass
389
 
390
- def process_feedback(text):
391
- return text
 
 
 
 
 
392
 
393
- def select_prompts(article,*args):
394
- '''
395
- select the prompts based on the content and the search terms
396
- that was included in the content
397
 
398
- Parameters
399
- ----------
400
- article : dict
401
- article object
 
402
 
403
- Returns
404
- -------
405
- dict
406
- prompts
407
- '''
408
-
409
- # get template names based on the search terms
410
- memory = set()
411
- prompts = []
412
- for t in app_data["terms"]:
413
- t["terms"] = t["term"].split(",")
414
- if reduce(mul, [s in article["content"] for s in t["terms"]], 1) and t["template_name"] not in memory:
415
- # get prompts based from templates
416
- template_names = t["template_name"].split(",")
417
- for tn in template_names:
418
- prompts.extend([p for p in app_data["prompts"] if p["template_name"]==tn])
419
- if prompts:
420
- prompts[-1]["prompt"].replace("<--clinical term-->",t["clinical term"])
421
- prompts[-1]["prompt"].replace("<--radiologic term-->",t["clinical term"])
422
- prompts[-1]["prompt"].replace("<--other term-->",t["clinical term"])
423
-
424
- memory.add(t["template_name"])
425
-
426
- # add overview prompts
427
- prompts.extend([ov for ov in app_data["prompts"] if ov["assessment_step"]=="overview"])
428
- # print("number of prompts",len(prompts))
429
-
430
- # check if groups, levels and preopratives are in the article
431
- article_logic = {}
432
- for k,value in logic_keywords.items():
433
- article_logic[k] = bool(sum([kw in article["content"] for kw in value]))
434
- # print(article_logic)
435
-
436
- # use article_logic to filter prompts
437
- prompts = [p for p in prompts
438
- if (p["groups"] == article_logic["groups"] or p["groups"] is None)
439
- and (p["levels"] == article_logic["levels"] or p["levels"] is None)
440
- and (p["preoperatives"] == article_logic["preoperatives"] or p["preoperatives"] is None)]
441
 
442
-
443
- # print("number of prompts after logic",len(prompts))
444
- # early return if no specific result
445
- if "".join(args) == "":
446
- # print("no args")
447
- return prompts
448
-
449
- # # performance metrics and safety metrics filter
450
- # for p in prompts:
451
- # if not sum([a in p["clinical term"] for a in args if a]):
452
- # print(p["template_name"])
453
- # prompts.remove(p)
454
- # print("number of prompts after args",len(prompts))
455
- return prompts
456
 
457
  def keyword_search(keywords,full_text):
458
  keywords_result = {}
@@ -461,4 +386,37 @@ def keyword_search(keywords,full_text):
461
  keywords_result[k]=list_or([keyword_search(kw,full_text) for kw in k])
462
  else:
463
  keywords_result[k]=keyword_search(k,full_text)
464
- return keywords_result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # language default packages
2
  from datetime import datetime
 
 
 
3
  from collections import defaultdict
4
 
5
  # external packages
 
26
  def process_study(
27
  study_file_obj,
28
  study_content,
 
 
 
 
29
  device=default_device
30
  ):
31
 
 
36
  else:
37
  return "No file or content provided","No file or content provided","No file or content provided"
38
 
39
+ app_data["current_article"] = article
40
+ selected_prompts = select_prompts(article["content"],terms=app_data["terms"],prompts=app_data["prompts"])
 
 
 
 
 
 
 
41
 
42
  output = {
43
  "domain":article["domain"],
44
  "article":article["name"],
45
+ "outcomes":defaultdict(str)
46
  }
47
 
48
+ res = process_prompts(article["content"],selected_prompts)
49
+ output["outcomes"] = res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ # overview = create_overview(output["output"]["Overview"])
52
+ views = create_views(res)
53
 
54
  add_output(output)
55
 
56
+ return views
57
+ # return ""
58
 
59
  def refresh():
60
  '''
61
  this function refresh the application data from the cloud backend
62
  '''
63
  init_app_data()
 
64
 
65
+ article = app_data["current_article"]
66
+ selected_prompts = select_prompts(article["content"],terms=app_data["terms"],prompts=app_data["prompts"])
67
+
68
+ output = {
69
+ "domain":article["domain"],
70
+ "article":article["name"],
71
+ "outcomes":defaultdict(str)
72
+ }
 
 
 
 
 
 
73
 
74
+ res = process_prompts(article["content"],selected_prompts)
75
+ output["outcomes"] = res
76
+ views = create_views(res)
77
+ add_output(output)
78
+
79
+ return views
80
 
81
+ def create_views(output):
82
  md_text = ""
83
+
84
+ overview = [v for _,v in output.items() if v["assessment"] == "overview"][0]
85
+ safety = [v for _,v in output.items() if v["assessment"] == "safety"][0]
86
+ # add overview
87
+ md_text += f"<details>\n<summary>Overivew</summary>\n\n"
88
+ md_text += overview["content"] + "\n</details>\n\n"
89
+
90
+ # add performance
91
+ md_text += f"<details>\n<summary>Performance</summary>\n\n"
92
+ for title,content in output.items():
93
+ if content["assessment"] not in ["overview","safety"]:
94
+ md_text += f"#### {content['assessment']} - {title}\n\n"
95
+ md_text += content["content"] + "\n\n"
96
+ md_text += "</details>\n\n"
97
+
98
+ # add safety
99
+ md_text += f"<details>\n<summary>Safety</summary>\n\n"
100
+ md_text += safety["content"] + "\n\n" + "</details>\n\n"
101
+
102
  return gr.update(value=md_text)
103
 
104
 
 
212
  '''
213
  if file_object:
214
  content, _ = read_pdf(file)
215
+ filename = file.name.split("\\")[-1]
216
+ # name = filename.split(".")[0]
217
  else:
218
  content = file
219
+ # filename = file.name
220
+ filename = f"temp_{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
 
 
 
 
 
221
 
222
  article ={
223
  "domain":domain,
224
+ "name":filename,
225
  "content":content,
 
 
 
226
  "upload_time":datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
227
  }
228
 
229
  if add_to_s3 and file_object:
230
+ s3_path = upload_fileobj(file,domain,filename)
231
  article["s3_path"] = s3_path
232
 
233
  if add_to_local:
 
235
 
236
  res = post_item("articles",article)
237
  if "Error" in res:
238
+ print(res["Error"])
239
  return res
240
 
241
  return article
 
353
  def update_device():
354
  pass
355
 
356
+ # identify article state
357
+ def identify_logic(text):
358
+ article_logic = [
359
+ "groups",
360
+ "levels",
361
+ "preoperatives"
362
+ ]
363
 
364
+ return {l:l in text.lower() for l in article_logic}
 
 
 
365
 
366
+ def select_prompts(text,terms,prompts):
367
+ selected_templates = set()
368
+ for t in terms:
369
+ if all([term in text for term in t["terms"]]):
370
+ selected_templates.update(t["template_name"])
371
 
372
+ logic = identify_logic(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373
 
374
+ selected_prompts = [p for p in prompts if p["template_name"] in selected_templates]
375
+ overview_prompts = [p for p in prompts if p["assessment_step"] == "overview"]
376
+ for p in overview_prompts:
377
+ if all([p[l]==v for l,v in logic.items() if v]):
378
+ selected_prompts.append(p)
379
+
380
+ return selected_prompts
 
 
 
 
 
 
 
381
 
382
  def keyword_search(keywords,full_text):
383
  keywords_result = {}
 
386
  keywords_result[k]=list_or([keyword_search(kw,full_text) for kw in k])
387
  else:
388
  keywords_result[k]=keyword_search(k,full_text)
389
+ return keywords_result
390
+
391
+ def process_prompts(text,prompts):
392
+ '''
393
+ process_prompts function receive the text and prompts and return the instruction stream
394
+
395
+ Parameters
396
+ ----------
397
+ text : str
398
+ text of the article
399
+ prompts : list
400
+ list of prompts
401
+
402
+ Returns
403
+ -------
404
+ dict
405
+ processed extraction results from openai api
406
+ '''
407
+ res = defaultdict(dict)
408
+ for p in prompts:
409
+ inst = [
410
+ p["prompt"]+", ".join(p["fields"]),
411
+ p["reformat_inst"]
412
+ ]
413
+ inst_stream = create_inst(text,inst)
414
+ extraction = send_inst(inst_stream)
415
+
416
+ res[p["template_name"]] = {
417
+ "template_name":p["template_name"],
418
+ "assessment":p["assessment_step"],
419
+ "content":extraction
420
+ }
421
+
422
+ return res
supplier.py CHANGED
@@ -1,11 +1,6 @@
1
  import openai
2
  from application import *
3
 
4
- # import json
5
- #
6
- # with open("openai_api_key.json", "r") as f:
7
- # openai_api_key = json.load(f)["key"]
8
-
9
  openai.api_key = openai_api_key
10
 
11
  def execute_prompt(prompt):
@@ -28,3 +23,67 @@ def execute_prompt(prompt):
28
  presence_penalty=0
29
  )
30
  return res.choices[0]["text"] if res.choices else "<error> failed to generate text</error>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import openai
2
  from application import *
3
 
 
 
 
 
 
4
  openai.api_key = openai_api_key
5
 
6
  def execute_prompt(prompt):
 
23
  presence_penalty=0
24
  )
25
  return res.choices[0]["text"] if res.choices else "<error> failed to generate text</error>"
26
+
27
+ def format(**kwargs):
28
+ if "format" in kwargs:
29
+ return kwargs["format"]
30
+ return kwargs
31
+
32
+
33
+ def execute_instruction(article, instruction,model="gpt-3.5-turbo-16k",format="markdown"):
34
+ '''
35
+ execute_instruction function takes three arguments: article, instruction and model
36
+
37
+ article: the raw text from the article source
38
+ instruction: the instruction for the rational execution it needs to complete
39
+ model: the model used for the rational execution, default to gpt-3.5-turbo-16k
40
+ format: the format of the table, default to markdown
41
+
42
+ return: a string, the result of the rational execution
43
+ '''
44
+ msg_stream = [
45
+ {
46
+ "role":"system",
47
+ "content":article
48
+ }
49
+ ]
50
+
51
+ msg_stream.append({
52
+ "role":"user",
53
+ "content":instruction
54
+ })
55
+
56
+ msg_stream.append({
57
+ "role":"user",
58
+ "content":f"Format the table in {format} syntax"
59
+ })
60
+
61
+ res= openai.ChatCompletion.create(
62
+ model=model,
63
+ messages=msg_stream,
64
+ temperature=0)
65
+
66
+ return res["choices"][0]["message"]["content"]
67
+
68
+ def create_inst(article, instructions):
69
+ msg_stream = [
70
+ {
71
+ "role":"system",
72
+ "content":article
73
+ }
74
+ ]
75
+ for i in instructions:
76
+ msg_stream.append({
77
+ "role":"user",
78
+ "content":i
79
+ })
80
+
81
+ return msg_stream
82
+
83
+ def send_inst(stream, model="gpt-3.5-turbo-16k",temperature=0):
84
+ res= openai.ChatCompletion.create(
85
+ model=model,
86
+ messages=stream,
87
+ temperature=temperature)
88
+
89
+ return res["choices"][0]["message"]["content"]
ui_study.py CHANGED
@@ -1,22 +1,16 @@
1
  import gradio as gr
2
 
3
- # from article import *
4
  from utility import *
5
  from application import *
6
  from features import *
7
 
8
  def reset():
 
 
 
9
  return (
10
  gr.Files.update(value=None),
11
  gr.TextArea.update(value=""),
12
- gr.Textbox.update(value=""),
13
- gr.Textbox.update(value=""),
14
- gr.Textbox.update(value=""),
15
- gr.Textbox.update(value=""),
16
- gr.Checkbox.update(value=False),
17
- gr.Slider.update(value=0),
18
- gr.Markdown.update(value=""),
19
- gr.Markdown.update(value=""),
20
  gr.Markdown.update(value="")
21
  )
22
 
@@ -25,76 +19,44 @@ with gr.Blocks() as study_page:
25
  with gr.Column():
26
  gr.Markdown("## Studies")
27
  gr.HTML("<hr>")
28
-
29
  upload_study = gr.File(label="Upload a clinical study report",type="file")
 
30
  input_study = gr.TextArea(label="Or paste a clinical study report content",placeholder="Paste content here...",lines=5)
31
 
32
  with gr.Row():
33
  btn_reset = gr.Button(value="Reset",variant="stop")
34
  btn_add_study = gr.Button(value="Add",variant="primary")
35
- with gr.Column():
36
 
37
- performance_metric_1 = gr.Textbox(lines=1, label="identify Key Performance Outcome 1",placeholder="e.g. VAS Score")
38
- performance_metric_2 = gr.Textbox(lines=1, label="identify Key Performance Outcome 2",placeholder="e.g. Incidence of Fusion")
39
- safety_metric_1 = gr.Textbox(lines=1, label="identify Key Safety Outcome 1",placeholder="e.g. Incidence of Revision")
40
- safety_metric_2 = gr.Textbox(lines=1, label="identify Key Safety Outcome 2",placeholder="e.g. Incidence of Nonunion")
41
-
42
- device_options["secondary extraction"] = gr.Checkbox(label="Will a secondary extraction with device stratification be required?",interactive=True)
43
- device_options["secondary extraction count"] = gr.Slider(minimum=0, maximum=10, step=1, label="How many secondary extractions with device stratification be required?",interactive=True)
44
-
45
- gr.Markdown("<hr>")
46
- with gr.Row():
47
  with gr.Column():
48
- gr.Markdown("## Literature Report")
49
- with gr.Column():
50
- bt_refresh = gr.Button(value="Refresh",variant="primary")
51
-
52
- gr.Markdown("<hr>")
53
-
54
- with gr.Row():
55
-
56
- with gr.Column(scale=2):
57
- overview = gr.Markdown("")
58
-
59
- with gr.Column(scale=3):
60
- details = gr.Markdown("")
61
 
62
- btn_reset.click(
63
- reset,
64
- outputs=[
65
- upload_study,
66
- input_study,
67
- performance_metric_1,
68
- performance_metric_2,
69
- safety_metric_1,
70
- safety_metric_2,
71
- device_options["secondary extraction"],
72
- device_options["secondary extraction count"],
73
- overview,
74
- details
75
- ]
76
- )
77
 
78
  btn_add_study.click(
79
  process_study,
80
  inputs=[
81
  upload_study,
82
  input_study,
83
- performance_metric_1,
84
- performance_metric_2,
85
- safety_metric_1,
86
- safety_metric_2
87
  ],
88
  outputs=[
89
- overview,
90
- details
91
  ],
92
  )
93
 
94
- bt_refresh.click(
95
  refresh,
96
  outputs=[
97
- overview,
98
- details
99
- ]
100
  )
 
1
  import gradio as gr
2
 
 
3
  from utility import *
4
  from application import *
5
  from features import *
6
 
7
  def reset():
8
+ '''
9
+ reset gradio input and output features in this page.
10
+ '''
11
  return (
12
  gr.Files.update(value=None),
13
  gr.TextArea.update(value=""),
 
 
 
 
 
 
 
 
14
  gr.Markdown.update(value="")
15
  )
16
 
 
19
  with gr.Column():
20
  gr.Markdown("## Studies")
21
  gr.HTML("<hr>")
22
+
23
  upload_study = gr.File(label="Upload a clinical study report",type="file")
24
+
25
  input_study = gr.TextArea(label="Or paste a clinical study report content",placeholder="Paste content here...",lines=5)
26
 
27
  with gr.Row():
28
  btn_reset = gr.Button(value="Reset",variant="stop")
29
  btn_add_study = gr.Button(value="Add",variant="primary")
 
30
 
 
 
 
 
 
 
 
 
 
 
31
  with gr.Column():
32
+ with gr.Row():
33
+ gr.Markdown("## Literature Report")
34
+ btn_refresh = gr.Button(value="Refresh",variant="primary")
35
+ views = gr.Markdown("")
 
 
 
 
 
 
 
 
 
36
 
37
+ btn_reset.click(
38
+ reset,
39
+ outputs=[
40
+ upload_study,
41
+ input_study,
42
+ views,
43
+ ]
44
+ )
 
 
 
 
 
 
 
45
 
46
  btn_add_study.click(
47
  process_study,
48
  inputs=[
49
  upload_study,
50
  input_study,
 
 
 
 
51
  ],
52
  outputs=[
53
+ views,
 
54
  ],
55
  )
56
 
57
+ btn_refresh.click(
58
  refresh,
59
  outputs=[
60
+ views,
61
+ ],
 
62
  )
utility.py CHANGED
@@ -1,5 +1,4 @@
1
  import json
2
- import tempfile
3
 
4
  from application import *
5
  from pdfminer.high_level import extract_text
@@ -143,7 +142,6 @@ def py_dict_to_db_map(py_dict):
143
  db_map[key] = {"BOOL":value}
144
  elif value is None:
145
  db_map[key] = {"NULL":True}
146
-
147
  return db_map
148
 
149
  # convert dynamodb list to python list
 
1
  import json
 
2
 
3
  from application import *
4
  from pdfminer.high_level import extract_text
 
142
  db_map[key] = {"BOOL":value}
143
  elif value is None:
144
  db_map[key] = {"NULL":True}
 
145
  return db_map
146
 
147
  # convert dynamodb list to python list