File size: 3,180 Bytes
dbd339c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import cv2
import streamlit as st
from streamlit_webrtc import webrtc_streamer
import numpy as np
from PIL import Image
from Moildev import Moildev
import threading

moildev = Moildev('Raspi_Cam.json')

def brighten_image(image, amount):
    img_bright = cv2.convertScaleAbs(image, beta=amount)
    return img_bright


def anypoint_image(image, alpha, beta, zoom, mode):
    any_image = moildev.anypoint(image, alpha, beta, zoom, mode)
    return any_image


def main_loop():


    st.title("MoilDev Demo App")
    st.subheader("This app allows you to play with Image filters!")
    st.text("We use OpenCV and Streamlit for this demo")


    # radio
    source = st.sidebar.radio('Sources:', ('image','video'))

    if source == 'image':

        # slider
        zoom = st.sidebar.slider("Zoom", min_value=1.0, max_value=3.5, value=1.2)
        alpha = st.sidebar.slider("Alpha", min_value=0.0, max_value=180.0, value=60.0)
        beta = st.sidebar.slider("Beta", min_value=0.0, max_value=180.0, value=60.0)
        brightness_amount = st.sidebar.slider("Brightness", min_value=-50, max_value=50, value=0)

        # checkbox
        mode_filter = st.sidebar.checkbox('Car Mode')

        image_file = st.file_uploader("Upload Your Image", type=['jpg', 'png', 'jpeg'])
        if not image_file:
            return None

        original_image = Image.open(image_file)
        original_image = np.array(original_image)

        if mode_filter == 1:
            processed_image = anypoint_image(original_image, alpha=alpha, beta=beta, zoom=zoom, mode=1)

        else:
            processed_image = anypoint_image(original_image, alpha=alpha, beta=beta, zoom=zoom, mode=2)


        processed_image = brighten_image(processed_image, brightness_amount)
        st.text("Original Image vs Processed Image")
        st.image([original_image, processed_image])

    if source == 'video':

        # slider
        zoom = st.sidebar.slider("Zoom", min_value=1.0, max_value=3.5, value=1.2)
        alpha = st.sidebar.slider("Alpha", min_value=0.0, max_value=180.0, value=60.0)
        beta = st.sidebar.slider("Beta", min_value=0.0, max_value=180.0, value=60.0)

        # checkbox
        mode_filter = st.sidebar.checkbox('Car Mode')

        lock = threading.Lock()
        img_container = {"img": None}

        def video_frame_callback(frame):
            img = frame.to_ndarray(format="bgr24")
            with lock:
                img_container["img"] = img
            return frame

        ctx = webrtc_streamer(key="example", video_frame_callback=video_frame_callback)
        vsn_place = st.empty()


        while ctx.state.playing:
            cv2.waitKey(10)

            with lock:
                img = img_container["img"]
            if img is None:
                continue

            if mode_filter == 1:
                processed_image = anypoint_image(img, alpha=alpha, beta=beta, zoom=zoom, mode=1)
                vsn_place.image(processed_image)

            else:
                processed_image = anypoint_image(img, alpha=alpha, beta=beta, zoom=zoom, mode=2)
                vsn_place.image(processed_image)



if __name__ == '__main__':
    main_loop()