File size: 21,227 Bytes
5de280e bb32635 5de280e bb32635 e4c9c08 5de280e bb32635 e4c9c08 b900916 bb32635 b900916 bb32635 b900916 bb32635 b900916 bb32635 004db8a b900916 bb32635 004db8a bb32635 3bbaa1f 5de280e b900916 e4c9c08 3bbaa1f 004db8a b900916 3bbaa1f b900916 0f8c356 bb32635 b900916 bb32635 004db8a 858a4f7 bb32635 3bbaa1f 004db8a bb32635 3bbaa1f e4c9c08 bb32635 3bbaa1f b900916 004db8a bb32635 004db8a bb32635 364c11a bb32635 364c11a bb32635 004db8a bb32635 004db8a bb32635 5c94939 bb32635 b900916 1f8db9c 858a4f7 bb32635 858a4f7 bb32635 858a4f7 3bbaa1f 1f8db9c 3bbaa1f 5de280e bb32635 3bbaa1f bb32635 3bbaa1f bb32635 3bbaa1f bb32635 1f8db9c bb32635 1f8db9c 5de280e e4c9c08 bb32635 e4c9c08 bb32635 e4c9c08 bb32635 b900916 bb32635 3bbaa1f bb32635 5de280e bb32635 5de280e bb32635 5de280e bb32635 5de280e bb32635 b900916 5de280e bb32635 b900916 004db8a bb32635 b900916 5de280e 1f8db9c bb32635 5de280e bb32635 5de280e 1f8db9c 5de280e bb32635 5de280e bb32635 5de280e bb32635 5de280e 858a4f7 5de280e bb32635 5de280e bb32635 5de280e bb32635 5de280e bb32635 b900916 bb32635 004db8a bb32635 364c11a bb32635 004db8a bb32635 b900916 bb32635 b900916 bb32635 b900916 bb32635 364c11a 1f8db9c 364c11a bb32635 b900916 bb32635 b900916 bb32635 b900916 bb32635 1f8db9c bb32635 1f8db9c bb32635 1f8db9c bb32635 1f8db9c bb32635 004db8a bb32635 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 | # language default packages
from datetime import datetime
# external packages
import gradio as gr
import asyncio
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
# internal packages
from chains import *
from cloud_db import *
from cloud_storage import *
from supplier import *
from utility import list_dict_to_dict
# get prompts, terms, outputs from the cloud
@terminal_print
def init_app_data():
'''
A function to initialize the application data from the cloud backend.
All the cloud data was saved in the app_data dictionary.
Parameters
----------
None
Returns
-------
None
'''
app_data["prompts"] = list_dict_to_dict(get_table("prompts"),key="prompt_name")
app_data["terms"] = get_table("terms")
app_data["articles"] = list_dict_to_dict(get_table("articles"),key="name")
app_data["summary"] = list_dict_to_dict(get_table("summary"),key="term")
with open(".data/instruction_agg_performance.json","r") as f:
prompts_agg_json = json.load(f)
app_data["prompts_agg"] = list_dict_to_dict(prompts_agg_json,key="assessment")
@terminal_print
def get_existing_article(
article_name,
):
'''
get_existing_article function receive the article name and return the article object
Parameters
----------
article_name : str
name of the article
Returns
-------
dict
article object
'''
article = app_data["articles"][article_name]
app_data["current_article"] = article
return create_overview(article), create_detail_views(article)
@terminal_print
def generate_summary():
articles = app_data["user"]["summary"]["articles"]
pass
@terminal_print
def process_study( # need revision
domain,
study_file_obj,
study_content,
):
if study_file_obj:
article = add_article(domain,study_file_obj)
elif study_content:
article = add_article(domain,study_content,file_object=False)
else:
return "No file or content provided","No file or content provided","No file or content provided"
# update the common article segment from its existing attributes.
update_article_segment(article)
# perform pathway logic and content extraction
process_prompts(article=article)
# perform a post process for perfFUTables
post_process(article)
# set the current article to the completed article object
app_data["current_article"] = article
app_data["articles"][article["name"]] = article
# update the article to the cloud
try:
update_article(article)
except Exception as e:
print(e)
# return overview, detail_views
# create overview and detail markdown views for the article
detail_views = create_detail_views(article)
overview = create_overview(article)
return overview, detail_views
@terminal_print
def process_studies(
domain,
file_objs):
for file_obj in file_objs:
process_study(domain,file_obj,None)
return gr.update(value=create_md_tables(app_data["articles"]))
@terminal_print
def create_md_tables(articles):
'''
create markdown tables for the articles.
'''
md_text = ""
md_text += "| Article Name | Authors | Domain | Upload Time |\n| --- | --- | --- | --- |\n"
for name, article in articles.items():
md_table = f"| {name} | {article['Authors']} |{article['domain']} | {article['upload_time']} | \n"
md_text += md_table
return md_text
@terminal_print
def update_article_segment(article):
# get the key content between article objective and discussion
raw_content = article["raw"]
index_discussion = raw_content.lower().index("discussion") if "discussion" in raw_content.lower() else len(raw_content)
# get the meta data
meta_content = raw_content[:index_discussion]
abstract, next_content = get_key_content(raw_content,"objective","key") # article Liu does not have objective and key but has introduction.
introduction, next_content = get_key_content(next_content,"key","methods")
materials_and_methods, next_content = get_key_content(next_content,"methods","results")
results, _ = get_key_content(next_content,"results","discussion")
# update the article object
article.update({
"Abstract": abstract,
"Introduction": introduction,
"Material and Methods": materials_and_methods,
"Results": results,
"Meta Content": meta_content,
"tables": ""
})
# add the key content as an aggregation of the other sections
article.update({
# "key_content": article["Abstract"] + article["Introduction"] + article["Material and Methods"] + article["Results"],
"key_content": article["Abstract"] + article["Material and Methods"] + article["Results"],
})
# add the recognized logic to the article
article.update(identify_logic(article["key_content"]))
# one thing to notice here, due to the fact that update_article_segment function perform direct change on the article object,
# there is no need to re-assign the article object to the same variable name
try:
pre_loop = asyncio.new_event_loop()
pre_loop.run_until_complete(get_segments(article,article_prompts))
pre_loop.close()
except:
pre_loop = asyncio.get_event_loop()
tasks = []
tasks.append(get_segments(article,article_prompts))
asyncio.gather(*tasks,return_exceptions=True)
@aterminal_print # need to review this.
async def gen_segment(article,name,chain):
resp = await chain.ainvoke({"term":""})
article[name] = resp.content #["content"]
@aterminal_print # need to review this.
async def get_segments(article,prompts):
llm = ChatOpenAI(
temperature=0.0,
model_name="gpt-3.5-turbo-16k",
openai_api_key=openai.api_key)
tasks = []
for name,p in prompts.items():
prompt = ChatPromptTemplate.from_messages([
("human",article["Meta Content"]),
("system","From the text above "+p),
])
chain = prompt | llm
tasks.append(gen_segment(article,name,chain))
await asyncio.gather(*tasks)
@terminal_print
def refresh():
'''
this function refresh the application data from the cloud backend
'''
init_app_data()
article = app_data["current_article"]
if not article:
return "No file or content provided"
process_prompts(article)
detail_views = create_detail_views(article)
overview = create_overview(article)
update_article(article=article)
return overview, detail_views
@terminal_print
def create_overview(article):
# md_text = ""
assessment = "overview"
md_text = f"## Overview\n\n"
overview_components = article["extraction"][assessment]
for component in overview_components:
md_text += f"#### {assessment} - {component}\n\n"
if component in article:
md_text += article[component] + "\n\n"
else:
md_text += "No content found\n\n"
# md_text += article[component] + "\n\n"
return gr.update(value=md_text)
@terminal_print
def create_detail_views(article):
md_text = "## Performance\n\n"
assessments = ["clinical","radiologic","safety","other"]
# add performance
for a in assessments:
if a in article["extraction"]:
md_text += f"### {a.capitalize()}\n\n"
performance_components = article["extraction"][a]
for component in performance_components:
md_text += f"#### {a} - {component}\n\n"
if component in article:
md_text += article[component] + "\n\n"
else:
md_text += "No content found\n\n"
return gr.update(value=md_text)
@terminal_print
def get_key_content(text:str,start,end:str,case_sensitive:bool=False): # not getting the materials and methods
'''
this function extract the content between start and end
and return the content in between. If no start or end is
found, the function will return the empty string.
Parameters
----------
text : str
text of the article
start : list
list of start substrings
end : list
list of end substrings
Returns
-------
str
content between start and end
'''
# if not case_sensitive:
text = text.lower()
end = end.lower()
if type(start) is str:
start = start.lower()
start_index = text.find(start)
else:
start_index = start
end_index = text.find(end)
# if the start is not found, set the start as the beginning of the text
if start_index == -1:
start_index = 0
# if the end is not found, return the from the start to the end of the text for both
# the searched text and the remaining text
if end_index == -1:
end_index = 0
return text[start_index:],text[start_index:]
# return the searched text and the remaining text
return text[start_index:end_index],text[end_index:]
@terminal_print
def get_articles(update_local=True):
'''
this function return the list of articles
Parameters
----------
update_local : bool, optional
update the local memory, by default True
Returns
-------
list
list of articles
'''
articles = get_table("articles")
if update_local:
app_data["articles"] = list_dict_to_dict(articles)
return articles
@terminal_print
def get_article(domain,name):
'''
this function return the article object
Parameters
----------
domain : str
subject domain of the article
name : str
name of the article
Returns
-------
dict
article object
'''
article = get_item("articles",{"domain":domain,"name":name})
return article
@terminal_print
def add_article(domain,file,add_to_s3=True, add_to_local=True, file_object=True):
'''
this function receive the domain name and file obj
and add the article to the cloud, s3 and local memory
Parameters
----------
domain : str
subject domain of the article
file_obj : file object
file object of the article
add_to_s3 : bool, optional
add article to s3 bucket, by default True
add_to_local : bool, optional
add article to local memory, by default True
Returns
-------
dict
article object
'''
if type(file) is str:
content = file
filename = file
upload_file(file,default_s3_bucket,filename)
else:
# extract the content from the pdf file
content, _ = read_pdf(file)
if "\\" in file.name:
filename = file.name.split("\\")[-1]
elif "/" in file.name:
filename = file.name.split("/")[-1]
else:
filename = file.name
# upload the article to s3
pdf_obj = open(file.name, 'rb')
upload_fileobj(pdf_obj,default_s3_bucket,filename)
pdf_obj.close()
article ={
"domain":domain,
"name":filename,
"raw":content,
"upload_time":datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
if add_to_local:
app_data["articles"][article["name"]]=article
res = post_item("articles",article)
if "Error" in res:
print(res["Error"])
return res
return article
@terminal_print
def remove_article(domain,name,remove_from_s3=True, remove_from_local=True):
'''
this function remove the article from the cloud, s3 and local memory
Parameters
----------
domain : str
subject domain of the article
name : str
name of the article
remove_from_s3 : bool, optional
remove article from s3 bucket, by default True
remove_from_local : bool, optional
remove article from local memory, by default True
Returns
-------
dict
article object
'''
delete_item("articles",{"domain":domain,"name":name})
if remove_from_s3:
delete_file(domain,name)
if remove_from_local:
del app_data["articles"][name]
pass
delete_item("articles",{"domain":domain,"name":name})
return True
@terminal_print
def update_article(article,file_obj=None,update_local=True):
'''
this function receive the article object and update the article
to the cloud, s3 and local memory
Parameters
----------
article : dict
article object
file_obj : file object, optional
file object of the article, by default None
update_local : bool, optional
update article to local memory, by default True
Returns
-------
dict
article object
'''
if file_obj:
upload_fileobj(file_obj,article["domain"],article["name"])
if update_local:
app_data["articles"][article["name"]] = article
post_item("articles",article)
return article
@terminal_print
def identify_logic(text,logic_keywords=logic_keywords,case_sensitive=False):
'''
identify_logic function receive the text and return the logic of the article
Parameters
----------
text : str
text of the article
Returns
-------
dict
the type of prompt to be used for the article (groups, preoperative, both or none)
'''
if not case_sensitive:
text = text.lower()
prompt_logic={ # define the logic surfix for the prompt
(True,True):"prompt_p_g",
(True,False):"prompt_np_g",
(False,True):"prompt_p_ng",
(False,False):"prompt_np_ng",
}
article_observation = (
sum([text.count(kw) for kw in logic_keywords["groups"]])>3,
sum([text.count(kw) for kw in logic_keywords["preoperatives"]])>=3
)
return {"logic":prompt_logic[article_observation]}
# lets do it one by one
@terminal_print
def select_overview_prompts(article):
valid_prompts = set()
for t in app_data["terms"]:
# select overview prompts
if validate_term(article,t,"overview"):
# add the prompts to the memory
valid_prompts.update(t["prompts_list"])
sorted_prompts = sorted(valid_prompts,key=lambda x:app_data["prompts"][x]["section_sequence"])
article["extraction"]["overview"] = sorted_prompts
return {p:app_data["prompts"][p] for p in valid_prompts}
@terminal_print
def select_performance_prompts(article,performance_assessment):
valid_terms = []
search_text = article["key_content"]+article["Authors"]+article["Acceptance Month"]+article["Acceptance Year"]+"\n".join(article["tables"])
search_text = search_text.lower()
for t in app_data["terms"]:
# select overview prompts
if validate_term(article,t,performance_assessment):
# add the prompts to the memory
valid_terms.append(t)
valid_prompts = {}
for t in valid_terms:
if any([p not in valid_prompts for p in t["prompts_list"]]):
for p in t["prompts_list"]:
prompt = app_data["prompts"][p]
valid_prompts[p] = prompt
if "term" not in valid_prompts[p]:
valid_prompts[p]["term"] = {t["term"]:t}
else:
valid_prompts[p]["term"].update({t["term"]:t})
if performance_assessment not in article["extraction"]:
article["extraction"][performance_assessment] = set()
article["extraction"][performance_assessment].add(prompt["prompt_name"])
return valid_prompts
@terminal_print
def process_prompts(article): # function overly complicated. need to be simplified.
'''
process_prompts function receive the article identify the prompts to be used,
and traverse through the prompts and article to extract the content from the article
The prompts were selected based on the terms and the article attributes
Parameters
----------
article : dict
article object
terms : list
list of terms
prompts : list
list of prompts
Returns
-------
list
list of prompts selected for use on the article
'''
article["extraction"] = {}
overview_prompts = select_overview_prompts(article)
performance_assessments = ["clinical","radiologic","safety","other"]
performance_prompts = {}
for assessment in performance_assessments:
performance_prompts[assessment] = select_performance_prompts(article,assessment)
overview = asyncio.new_event_loop()
overview.run_until_complete(execute_concurrent(article,overview_prompts))
overview.close()
for assessment in performance_assessments:
performance = asyncio.new_event_loop()
performance.run_until_complete(execute_concurrent(article,performance_prompts[assessment]))
performance.close()
def validate_term(article,term,assessment_step):
# validate if the term is used for the right anatomic region for the article
if term["region"] != "all" and term["region"] != article["domain"].lower():
return False
if assessment_step == "overview" and term["assessment_step"] == "overview":
return True
# validate if the term is used for overview
if term["assessment_step"] == assessment_step:
# validate if the term is used for performance
key_text = (article["key_content"]+article["Authors"]+article["Acceptance Month"]+article["Acceptance Year"]+"\n".join(article["tables"]))
key_text = key_text.replace("/n"," ")
key_text = key_text.lower()
keywords = [kw.strip().lower() for kw in term["term"].split(",")]
return all([kw in key_text for kw in keywords])
return False
@terminal_print
def keyword_search(keywords,full_text):
keywords_result = {}
for k in keywords:
if type(k) is tuple or type(k) is list or type(k) is set:
keywords_result[k]=any([keyword_search(kw,full_text) for kw in k])
else:
keywords_result[k]=k in full_text
return keywords_result
@terminal_print
def execute_prompts(article,prompt):
# traverse back to add any article segments that are missing
for i in prompt["input_list"]:
if i.strip() not in article:
execute_prompts(article,app_data["prompts"][i.strip()]) # it might be a good idea to add level here.
# run executor
run_executor(article,prompt)
@terminal_print
def run_gpt(article,prompt):
# create the instruction stream
instructions = [
prompt[article["logic"]],
prompt["reformat_inst"]
]
text_in = "\n".join([article[i.strip()] for i in prompt["input_list"]])
inst_stream = create_inst(text_in,instructions)
print(prompt["prompt_name"])
# send the instruction stream to the openai api
res = send_inst(inst_stream)
# return the result to the article object
article[prompt["prompt_name"]] = res
@terminal_print
def f_replacement_term(article,prompt):
input_text = article[prompt["input_list"][0]]
for t in app_data["summary"]:
result = input_text.replace(t["term"],t["term_replacement"])
article[prompt["prompt_name"]] = result
@terminal_print
def f_summary_term(article,prompt):
input_text = article[prompt["input_list"][0]]
for t in app_data["summary"]:
result = input_text.replace(t["term"],t["term_summary"])
article[prompt["prompt_name"]] = result
@terminal_print
def run_executor(article,prompt):
'''
run_executor function receive the text and prompts and select the executor for the text input
'''
match prompt["executed by"]:
case "gpt-3.5-turbo-16k":
run_gpt(article,prompt)
case "f_replacement_term":
f_replacement_term(article,prompt)
case "f_summary_term":
f_summary_term(article,prompt)
@retry_decorator
@terminal_print
def post_process(article):
post_inputs = {}
for assessment,segements in article["extraction"].items():
if assessment == "overview":
continue
post_inputs[assessment] = "\n".join([article[s] for s in segements])
template = ChatPromptTemplate.from_messages([
("human","{text}"),
("system","From the text above {instruction}"),
])
llm = ChatOpenAI(
temperature=0.0,
model_name="gpt-3.5-turbo-16k",
openai_api_key=openai.api_key)
chain = template | llm
for assessment,post_input in post_inputs.items():
instruction_agg = app_data["prompts_agg"][assessment]
article[instruction_agg["name"]] = chain.invoke({"text":post_input,"instruction":instruction_agg["chain"][0]}).content
article["extraction"][assessment].add(instruction_agg["name"])
def add_inst(instructions,prompt):
return instructions + prompt |