Spaces:
Sleeping
Sleeping
Jon Taylor
commited on
Commit
·
e606d44
1
Parent(s):
6776a75
1fps streaming hooray
Browse files- app/bot.py +7 -5
- app/pipeline.py +5 -5
app/bot.py
CHANGED
|
@@ -86,7 +86,7 @@ class DailyVision(EventHandler):
|
|
| 86 |
self.__camera = Daily.create_camera_device("camera",
|
| 87 |
width = video_frame.width,
|
| 88 |
height = video_frame.height,
|
| 89 |
-
color_format="
|
| 90 |
self.__client.update_inputs({
|
| 91 |
"camera": {
|
| 92 |
"isEnabled": True,
|
|
@@ -97,6 +97,8 @@ class DailyVision(EventHandler):
|
|
| 97 |
})
|
| 98 |
|
| 99 |
def process_frames(self):
|
|
|
|
|
|
|
| 100 |
while not self.__app_quit:
|
| 101 |
# Is anyone watching?
|
| 102 |
if not self.__idle and len(self.__client.participants()) < 2:
|
|
@@ -113,16 +115,16 @@ class DailyVision(EventHandler):
|
|
| 113 |
|
| 114 |
if video_frame:
|
| 115 |
image = Image.frombytes("RGBA", (video_frame.width, video_frame.height), video_frame.buffer)
|
| 116 |
-
|
|
|
|
| 117 |
#pil = Image.fromarray(result.render()[0], mode="RGB").tobytes()
|
| 118 |
-
|
| 119 |
-
self.__camera.write_frame(image.tobytes())
|
| 120 |
except queue.Empty:
|
| 121 |
pass
|
| 122 |
|
| 123 |
def on_video_frame(self, participant_id, video_frame):
|
| 124 |
# Process ~15 frames per second (considering incoming frames at 30fps).
|
| 125 |
-
if time.time() - self.__time > 0.05:
|
| 126 |
self.__time = time.time()
|
| 127 |
self.setup_camera(video_frame)
|
| 128 |
self.__queue.put(video_frame)
|
|
|
|
| 86 |
self.__camera = Daily.create_camera_device("camera",
|
| 87 |
width = video_frame.width,
|
| 88 |
height = video_frame.height,
|
| 89 |
+
color_format="RGB")
|
| 90 |
self.__client.update_inputs({
|
| 91 |
"camera": {
|
| 92 |
"isEnabled": True,
|
|
|
|
| 97 |
})
|
| 98 |
|
| 99 |
def process_frames(self):
|
| 100 |
+
params = Pipeline.InputParams()
|
| 101 |
+
|
| 102 |
while not self.__app_quit:
|
| 103 |
# Is anyone watching?
|
| 104 |
if not self.__idle and len(self.__client.participants()) < 2:
|
|
|
|
| 115 |
|
| 116 |
if video_frame:
|
| 117 |
image = Image.frombytes("RGBA", (video_frame.width, video_frame.height), video_frame.buffer)
|
| 118 |
+
result_image = self.__pipeline.predict(params, image).convert("RGB")
|
| 119 |
+
self.__camera.write_frame(result_image.tobytes())
|
| 120 |
#pil = Image.fromarray(result.render()[0], mode="RGB").tobytes()
|
| 121 |
+
#self.__camera.write_frame(result_image.tobytes())
|
|
|
|
| 122 |
except queue.Empty:
|
| 123 |
pass
|
| 124 |
|
| 125 |
def on_video_frame(self, participant_id, video_frame):
|
| 126 |
# Process ~15 frames per second (considering incoming frames at 30fps).
|
| 127 |
+
if time.time() - self.__time > 1: #0.05:
|
| 128 |
self.__time = time.time()
|
| 129 |
self.setup_camera(video_frame)
|
| 130 |
self.__queue.put(video_frame)
|
app/pipeline.py
CHANGED
|
@@ -51,10 +51,10 @@ class Pipeline:
|
|
| 51 |
1, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
|
| 52 |
)
|
| 53 |
width: int = Field(
|
| 54 |
-
|
| 55 |
)
|
| 56 |
height: int = Field(
|
| 57 |
-
|
| 58 |
)
|
| 59 |
guidance_scale: float = Field(
|
| 60 |
1.0,
|
|
@@ -181,8 +181,8 @@ class Pipeline:
|
|
| 181 |
|
| 182 |
self.pipe(
|
| 183 |
prompt="warmup",
|
| 184 |
-
image=[Image.new("RGB", (
|
| 185 |
-
control_image=[Image.new("RGB", (
|
| 186 |
)
|
| 187 |
|
| 188 |
def predict(self, params: "Pipeline.InputParams", image) -> Image.Image:
|
|
@@ -222,7 +222,7 @@ class Pipeline:
|
|
| 222 |
return None
|
| 223 |
result_image = results.images[0]
|
| 224 |
|
| 225 |
-
if os.getenv("CONTROL_NET_OVERLAY"):
|
| 226 |
# paste control_image on top of result_image
|
| 227 |
w0, h0 = (200, 200)
|
| 228 |
control_image = control_image.resize((w0, h0))
|
|
|
|
| 51 |
1, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
|
| 52 |
)
|
| 53 |
width: int = Field(
|
| 54 |
+
640, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
|
| 55 |
)
|
| 56 |
height: int = Field(
|
| 57 |
+
480, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
|
| 58 |
)
|
| 59 |
guidance_scale: float = Field(
|
| 60 |
1.0,
|
|
|
|
| 181 |
|
| 182 |
self.pipe(
|
| 183 |
prompt="warmup",
|
| 184 |
+
image=[Image.new("RGB", (640, 480))],
|
| 185 |
+
control_image=[Image.new("RGB", (640, 480))],
|
| 186 |
)
|
| 187 |
|
| 188 |
def predict(self, params: "Pipeline.InputParams", image) -> Image.Image:
|
|
|
|
| 222 |
return None
|
| 223 |
result_image = results.images[0]
|
| 224 |
|
| 225 |
+
if os.getenv("CONTROL_NET_OVERLAY", True):
|
| 226 |
# paste control_image on top of result_image
|
| 227 |
w0, h0 = (200, 200)
|
| 228 |
control_image = control_image.resize((w0, h0))
|