Singularity666 commited on
Commit
13c4a71
·
1 Parent(s): 51c45e2

Update_app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -79
app.py CHANGED
@@ -1,92 +1,51 @@
 
 
 
 
1
  import streamlit as st
2
  import requests
3
  import io
4
 
5
 
6
  # Designing the interface
7
- st.title("Medical Image Captioning")
8
 
9
- st.sidebar.markdown(
10
- """
11
- This project features 3 different Medical image captioning models.
12
- Two of the use the InceptionV3 architecture to do feature extraction and then generate the captions using an LSTM model.
13
- The difference between these two is that the first one uses InceptionV3 trained on ImageNet data and outputs 2048 features.
14
- The second one is based on a retrained version of InceptionV3 that uses the CUI data from the ROCO dataset to extract 745 features from the images.
15
- The final model is transformer based on...
16
- """
17
- )
18
 
19
  with st.spinner('Loading objects ...'):
20
  from model import *
21
 
22
  random_image_id = get_random_image_id()
23
 
24
- st.sidebar.title("Select a sample image")
25
- sample_image_id = st.sidebar.selectbox(
26
- "Please choose a sample image",
27
- sample_image_ids
28
- )
29
-
30
- st.sidebar.title("Select a model Type")
31
- model_type = st.sidebar.selectbox(
32
- "Please choose a model",
33
- ['Pretrained Inception', 'Retrained Inception', 'Transformer']
34
- )
35
-
36
- inception, lstm = fetch_model(model_type)
37
- word2Index, index2Word, variable_params = fetch_auxiliary_files(model_type)
38
- max_len = variable_params['max_caption_len']
39
-
40
- if st.sidebar.button("Random ROCO (test) images"):
41
- random_image_id = get_random_image_id()
42
- sample_image_id = "None"
43
-
44
- bytes_data = None
45
- with st.sidebar.form("file-uploader-form", clear_on_submit=True):
46
- uploaded_file = st.file_uploader("Choose a file")
47
- submitted = st.form_submit_button("Upload")
48
- if submitted and uploaded_file is not None:
49
- bytes_data = io.BytesIO(uploaded_file.getvalue())
50
-
51
- if (bytes_data is None) and submitted:
52
-
53
- st.write("No file is selected to upload")
54
-
55
- else:
56
-
57
- image_id = random_image_id
58
- if sample_image_id != "None":
59
- assert type(sample_image_id) == int
60
- image_id = sample_image_id
61
-
62
- sample_name = f"ROCO_{str(image_id).zfill(5)}.jpg"
63
- sample_path = os.path.join(sample_dir, sample_name)
64
-
65
- if bytes_data is not None:
66
- image = Image.open(bytes_data)
67
- elif os.path.isfile(sample_path):
68
- image = Image.open(sample_path)
69
-
70
- width, height = 299, 299
71
- resized = image.resize(size=(width, height))
72
-
73
- if bytes_data is None:
74
- st.markdown(f"ROCO_{str(image_id).zfill(5)}.jpg")
75
- show = st.image(resized)
76
- show.image(resized, '\n\nSelected Image')
77
-
78
- # For newline
79
- st.sidebar.write('\n')
80
-
81
- with st.spinner('Generating image caption ...'):
82
- st.header(f'Predicted caption:\n\n')
83
-
84
- preprocessed_img = preprocess_image_inception(resized)
85
- features = extract_features(inception, preprocessed_img)
86
- caption = generate_caption(lstm, features, max_len, word2Index, index2Word)
87
- st.subheader(caption)
88
-
89
- st.sidebar.header("Model predicts: ")
90
- st.sidebar.write(f"{caption}")
91
-
92
- image.close()
 
1
+ import os
2
+ import pandas as pd
3
+ import numpy as np
4
+ from PIL import Image
5
  import streamlit as st
6
  import requests
7
  import io
8
 
9
 
10
  # Designing the interface
11
+ st.title("RadiXGPT")
12
 
 
 
 
 
 
 
 
 
 
13
 
14
  with st.spinner('Loading objects ...'):
15
  from model import *
16
 
17
  random_image_id = get_random_image_id()
18
 
19
+ sample_name = f"ROCO_{str(random_image_id).zfill(5)}.jpg"
20
+ sample_path = os.path.join(sample_dir, sample_name)
21
+
22
+ image = Image.open(sample_path)
23
+ width, height = 299, 299
24
+ resized = image.resize(size=(width, height))
25
+ st.markdown(f"ROCO_{str(random_image_id).zfill(5)}.jpg")
26
+ show = st.image(resized)
27
+ show.image(resized, '\n\nSelected Image')
28
+
29
+ # For newline
30
+ st.sidebar.write('\n')
31
+
32
+ with st.spinner('Generating image caption ...'):
33
+ st.header(f'Predicted caption:\n\n')
34
+
35
+ preprocessed_img = preprocess_image_inception(resized)
36
+ features = extract_features(inception, preprocessed_img)
37
+
38
+ # Load the transformer model
39
+ transformer, _, _ = fetch_model('Transformer')
40
+ # Fetch the auxiliary files
41
+ word2Index, index2Word, variable_params = fetch_auxiliary_files('Transformer')
42
+ max_len = variable_params['max_caption_len']
43
+ # Generate the caption
44
+ caption = generate_caption(transformer, features, max_len, word2Index, index2Word)
45
+
46
+ st.subheader(caption)
47
+
48
+ st.sidebar.header("Model predicts: ")
49
+ st.sidebar.write(f"{caption}")
50
+
51
+ image.close()