File size: 5,466 Bytes
733e815 e445111 733e815 789ad27 733e815 789ad27 733e815 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | import torch
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
from interface import load_image_from_url, do_generate
findings = "enlarged cardiomediastinum, cardiomegaly, lung opacity, lung lesion, edema, consolidation, pneumonia, atelectasis, pneumothorax, pleural Effusion, pleural other, fracture, support devices"
templates = {
"single-image": (
"radiology image: <image> Which of the following findings are present in the radiology image? Findings: {findings}",
"Based on the previous conversation, provide a description of the findings in the radiology image.",
),
"multi-image": (
"radiology images: {images} Which of the following findings are present in the radiology images? Findings: {findings}",
"Based on the previous conversation, provide a description of the findings in the radiology images.",
),
"multi-study": (
"prior radiology images: {prior_images}, prior radiology report: {prior_report} follow-up images: {images}, The radiology studies are given in chronological order. Which of the following findings are present in the current follow-up radiology images? Findings: {findings}",
"Based on the previous conversation, provide a description of the findings in the current follow-up radiology images.",
),
"visual-grounding": (
"radiology image: <image> Provide the bounding box coordinate of the region this phrase describes: {phrase}",
),
"summarize": (
"radiology image: <image> Which of the following findings are present in the radiology image? Findings: {findings}",
"Based on the previous conversation, provide a description of the findings in the radiology image.",
"Summarize the description in one concise sentence.",
),
}
def do_generate_multi_turn(
sequential_questions, images, model, processor, generation_config
):
chats = []
for question in sequential_questions:
chats.append({"role": "user", "content": question})
# mini-batch size 1
prompts = []
prompt = processor.apply_chat_template(chats, tokenize=False)
prompts.append(prompt)
outputs = do_generate(prompts, images, model, processor, generation_config)
chats.append({"role": "assistant", "content": outputs[0]})
return chats
if __name__ == "__main__":
# Setup constant
device = torch.device("cuda")
dtype = torch.bfloat16
do_sample = False
# Load Processor and Model
processor = AutoProcessor.from_pretrained("Deepnoid/M4CXR-TNNLS", trust_remote_code=True)
generation_config = GenerationConfig.from_pretrained("Deepnoid/M4CXR-TNNLS")
model = AutoModelForCausalLM.from_pretrained(
"Deepnoid/M4CXR-TNNLS",
trust_remote_code=True,
torch_dtype=dtype,
device_map=device,
)
# example image
image = load_image_from_url(
"https://upload.wikimedia.org/wikipedia/commons/a/a1/Normal_posteroanterior_%28PA%29_chest_radiograph_%28X-ray%29.jpg"
)
# Task 1: single-image medical report generation (CoT Prompting)
images = [image]
questions = list(templates["single-image"])
questions[0] = questions[0].format(findings=findings)
chats = do_generate_multi_turn(
questions, images, model, processor, generation_config
)
print("=" * 5, "single-image medical report generation", "=" * 5)
print(chats)
# Task 2: multi-image medical report generation (CoT Prompting)
images = [image, image, image]
image_tokens = " ".join("<image>" for _ in images)
questions = list(templates["multi-image"])
questions[0] = questions[0].format(images=image_tokens, findings=findings)
chats = do_generate_multi_turn(
questions, images, model, processor, generation_config
)
print("=" * 5, "multi-image medical report generation", "=" * 5)
print(chats)
# Task 3: multi-study medical report generation (CoT Prompting)
prior_images = [image, image]
prior_image_tokens = " ".join("<image>" for _ in prior_images)
prior_report = "The lungs are clear. There is no pneumothorax."
follow_up_images = [image, image, image]
follow_up_image_tokens = " ".join("<image>" for _ in follow_up_images)
images = prior_images + follow_up_images
questions = list(templates["multi-study"])
questions[0] = questions[0].format(
prior_images=prior_image_tokens,
prior_report=prior_report,
images=follow_up_image_tokens,
findings=findings,
)
chats = do_generate_multi_turn(
questions, images, model, processor, generation_config
)
print("=" * 5, "multi-study medical report generation", "=" * 5)
print(chats)
# Task 4: visual grounding
images = [image]
phrase = "right lower lobe"
questions = list(templates["visual-grounding"])
questions[0] = questions[0].format(phrase=phrase)
chats = do_generate_multi_turn(
questions, images, model, processor, generation_config
)
print("=" * 5, "visual grounding", "=" * 5)
print(chats)
# Task 5: summarize (mrg & summarize)
images = [image]
questions = list(templates["summarize"])
questions[0] = questions[0].format(findings=findings)
chats = do_generate_multi_turn(
questions, images, model, processor, generation_config
)
print("=" * 5, "medical report generation & summarize", "=" * 5)
print(chats)
|