File size: 2,248 Bytes
9ac7403
56f7a23
 
 
 
 
 
 
 
2c7205f
56f7a23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81

import streamlit as st
import torch
from PIL import Image
import torchvision.transforms as transforms
from train import UNet
import numpy as np 

# Load the trained model
model_path = 'model.pth'
model = UNet(n_channels=3, n_classes=6)
model.load_state_dict(torch.load(model_path))
model.eval()

# Create a Streamlit app
st.title('Aerial Image Segmentation')

# Add a file uploader to the app
uploaded_file = st.file_uploader("Choose an image...", type="jpg")

if uploaded_file is not None:
    image = Image.open(uploaded_file)

    # Display the original image
    st.image(image, caption='Uploaded Image.', use_column_width=True)

    # Preprocess the image
    data_transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor()]
    )
    image = data_transform(image)
    image = image.unsqueeze(0)  # add a batch dimension

    # Pass the image through the model
    with torch.no_grad():
        output = model(image)

    # Postprocess the output
    # Define the color map
    color_map = {
        0: np.array([155, 155, 155]),  # Unlabeled
        1: np.array([60, 16, 152]),  # Building
        2: np.array([132, 41, 246]),  # Land
        3: np.array([110, 193, 228]),  # Road
        4: np.array([254, 221, 58]),  # Vegetation
        5: np.array([226, 169, 41])  # Water
    }
    class_labels = {
    0: 'Unlabeled',
    1: 'Building',
    2: 'Land',
    3: 'Road',
    4: 'Vegetation',
    5: 'Water'
    }

    # Display the class labels and their colors in a sidebar
    for k, v in class_labels.items():
        st.sidebar.markdown(f'<div style="color:rgb{tuple(color_map[k])};">{v}</div>', unsafe_allow_html=True)

    # Pass the image through the model
    with torch.no_grad():
        output = model(image)

    # Postprocess the output
    output = torch.argmax(output.squeeze(), dim=0).detach().cpu().numpy()

    # Squeeze the batch dimension
    output = np.squeeze(output)

    # Now you can create the RGB image
    output_rgb = np.zeros((output.shape[0], output.shape[1], 3), dtype=np.uint8)
    for k, v in color_map.items():
        output_rgb[output == k] = v

    # Display the segmented image
    st.image(output_rgb, caption='Segmented Image.', use_column_width=True)