AmirV97 commited on
Commit
6cfeaf6
·
1 Parent(s): 0e49ee3

First commit.

Browse files
Files changed (9) hide show
  1. F1.jpg +0 -0
  2. F2.jpg +0 -0
  3. F3.jpg +0 -0
  4. NF1.jpg +0 -0
  5. NF2.jpg +0 -0
  6. NF3.jpg +0 -0
  7. PrHu_model.pth +3 -0
  8. app.py +60 -0
  9. requirements.txt +13 -0
F1.jpg ADDED
F2.jpg ADDED
F3.jpg ADDED
NF1.jpg ADDED
NF2.jpg ADDED
NF3.jpg ADDED
PrHu_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3939b00c41628df6f9a55ba40dbab49fec289067d951a247a126d4186ce374e7
3
+ size 2344394
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from torch import nn
3
+ from torch.optim.swa_utils import AveragedModel
4
+ import torch
5
+ import gradio as gr
6
+ import os
7
+ import tempfile
8
+ import albumentations as A
9
+ from albumentations.pytorch import ToTensorV2
10
+
11
+ # preprocessing
12
+ transforms = A.Compose([
13
+ A.LongestMaxSize(384),
14
+ A.CLAHE(),
15
+ A.Normalize(normalization='image'),
16
+ A.PadIfNeeded(384, 384, border_mode=0, value=(0)),
17
+ ToTensorV2()
18
+ ])
19
+
20
+ # model
21
+ class PrHu_model(nn.Module):
22
+ def __init__(self):
23
+ super().__init__()
24
+ self.configuration = transformers.ConvNextV2Config(num_channels=1, drop_path_rate=0, image_size=384, num_labels=1,
25
+ depths=[2, 2, 6, 2], hidden_sizes=[16, 32, 64, 128])
26
+ self.model = transformers.ConvNextV2ForImageClassification(self.configuration)
27
+
28
+ def forward(self, x):
29
+ # print ('starting model F pass')
30
+ return self.model(x).logits
31
+
32
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
33
+ model = nn.DataParallel(PrHu_model()).to(device)
34
+ model.load_state_dict(torch.load('PrHu_model.pth', map_location=device))
35
+ mode.eval()
36
+
37
+ def inference(img_dir):
38
+ image = np.array(Image.open(str(img_dir)).convert('L'))
39
+ image = transforms(image=image)['image']
40
+ with torch.inference_mode():
41
+ out = model(image).item()
42
+ out = out > 0
43
+ return "Fracture +" if out else "Fracture -"
44
+
45
+ examples = ["NF1.jpg", "NF2.jpg", "NF3.jpg", "F1.jpg", "F2.jpg", "F3.jpg"]
46
+
47
+ #UI
48
+ iface = gr.Interface(
49
+ fn=inference,
50
+ inputs=[
51
+ gr.File(label="Upload Input Image"),
52
+ ],
53
+ outputs=gr.Textbox(label="Classification Result"),
54
+ title="Proximal Humerus Fracture Detection",
55
+ description="Upload an image, and get the classification result.",
56
+ examples=examples # Add example inputs
57
+ )
58
+
59
+ # Launch the interface
60
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ albumentations
2
+ torch
3
+ torchmetrics
4
+ torchvision
5
+ matplotlib
6
+ numpy
7
+ transformers
8
+ sklearn
9
+ pandas
10
+ h5py
11
+ tqdm
12
+ pickle
13
+ os