canturan10 commited on
Commit
bc88612
·
1 Parent(s): 225a2d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -28
app.py CHANGED
@@ -1,14 +1,15 @@
1
  import random
2
  from datetime import datetime
3
 
4
- import av
5
  import light_side as ls
6
  import numpy as np
7
  import requests
8
  import streamlit as st
9
  from PIL import Image
10
  from streamlit_image_comparison import image_comparison
11
- from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
 
 
12
 
13
 
14
  def main():
@@ -47,6 +48,10 @@ def main():
47
  # st.sidebar.caption(f"[Hugging Face](https://huggingface.co/spaces/canturan10/light_side)")
48
  st.sidebar.caption(f"[Pypi](https://pypi.org/project/light-side/)")
49
  st.sidebar.caption("")
 
 
 
 
50
  st.sidebar.caption(ls.__copyright__)
51
 
52
  selected_model = st.selectbox(
@@ -58,7 +63,7 @@ def main():
58
  ls.get_model_versions(selected_model),
59
  )
60
 
61
- mode = st.radio("Select Inference Mode", ("Image", "Video (WebRTC)"))
62
 
63
  model = ls.Enhancer.from_pretrained(selected_model, selected_version)
64
  model.eval()
@@ -90,35 +95,41 @@ def main():
90
  label2="Light Side",
91
  )
92
  else:
93
-
94
  st.write(
95
- "If video is not playing, please refresh the page. Depends on your browser and connection, it may take some time to load the video."
96
  )
97
-
98
- class VideoProcessor(VideoProcessorBase):
99
- def recv(self, frame):
100
-
101
- img = frame.to_ndarray(format="bgr24")
102
- results = model.predict(img)[0]
103
- orj_img = results["image"]
104
- enh_img = results["enhanced"]
105
-
106
- return av.VideoFrame.from_ndarray(
107
- np.concatenate((orj_img, enh_img), axis=1), format="bgr24"
108
- )
109
-
110
- ctx = webrtc_streamer(
111
- key="example",
112
- video_processor_factory=VideoProcessor,
113
- rtc_configuration={
114
- "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
115
- },
116
- media_stream_constraints={
117
- "video": True,
118
- "audio": False,
119
- },
120
  )
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  if __name__ == "__main__":
124
 
 
1
  import random
2
  from datetime import datetime
3
 
 
4
  import light_side as ls
5
  import numpy as np
6
  import requests
7
  import streamlit as st
8
  from PIL import Image
9
  from streamlit_image_comparison import image_comparison
10
+
11
+ # import av
12
+ # from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
13
 
14
 
15
  def main():
 
48
  # st.sidebar.caption(f"[Hugging Face](https://huggingface.co/spaces/canturan10/light_side)")
49
  st.sidebar.caption(f"[Pypi](https://pypi.org/project/light-side/)")
50
  st.sidebar.caption("")
51
+ st.sidebar.markdown(
52
+ "[![Support Badge](https://img.shields.io/badge/-buy_me_a%C2%A0coffee-orange?style=for-the-badge&logo=Buy-me-a-coffee&logoColor=white&link=https://canturan10.github.io/)](https://www.buymeacoffee.com/canturan10)"
53
+ )
54
+ st.sidebar.caption("")
55
  st.sidebar.caption(ls.__copyright__)
56
 
57
  selected_model = st.selectbox(
 
63
  ls.get_model_versions(selected_model),
64
  )
65
 
66
+ mode = st.radio("Select Inference Mode", ("Image", "Video"))
67
 
68
  model = ls.Enhancer.from_pretrained(selected_model, selected_version)
69
  model.eval()
 
95
  label2="Light Side",
96
  )
97
  else:
 
98
  st.write(
99
+ "This feature has been suspended for now. Errors may occur during service free of charge due to limited resources. If there is support for the project, I can activate this feature again by increasing the limit."
100
  )
101
+ st.markdown(
102
+ "[![Support Badge](https://img.shields.io/badge/-buy_me_a%C2%A0coffee-orange?style=for-the-badge&logo=Buy-me-a-coffee&logoColor=white&link=https://canturan10.github.io/)](https://www.buymeacoffee.com/canturan10)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  )
104
 
105
+ # st.write(
106
+ # "If video is not playing, please refresh the page. Depends on your browser and connection, it may take some time to load the video."
107
+ # )
108
+
109
+ # class VideoProcessor(VideoProcessorBase):
110
+ # def recv(self, frame):
111
+
112
+ # img = frame.to_ndarray(format="bgr24")
113
+ # results = model.predict(img)[0]
114
+ # orj_img = results["image"]
115
+ # enh_img = results["enhanced"]
116
+
117
+ # return av.VideoFrame.from_ndarray(
118
+ # np.concatenate((orj_img, enh_img), axis=1), format="bgr24"
119
+ # )
120
+
121
+ # ctx = webrtc_streamer(
122
+ # key="example",
123
+ # video_processor_factory=VideoProcessor,
124
+ # rtc_configuration={
125
+ # "iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
126
+ # },
127
+ # media_stream_constraints={
128
+ # "video": True,
129
+ # "audio": False,
130
+ # },
131
+ # )
132
+
133
 
134
  if __name__ == "__main__":
135