freddyaboulton HF Staff commited on
Commit
ea524bf
·
verified ·
1 Parent(s): 9336e70

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. run.ipynb +1 -1
  3. run.py +15 -4
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 5.49.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 6.0.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2 # type: ignore\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " transform = gr.Dropdown(choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\", label=\"Transformation\")\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\")\n", " with gr.Column():\n", " output_img = gr.Image(streaming=True)\n", " dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],\n", " time_limit=30, stream_every=0.1, concurrency_limit=30)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import cv2 # type: ignore\n", "\n", "\n", "def transform_cv2(frame, transform):\n", " if transform == \"cartoon\":\n", " # prepare color\n", " img_color = cv2.pyrDown(cv2.pyrDown(frame))\n", " for _ in range(6):\n", " img_color = cv2.bilateralFilter(img_color, 9, 9, 7)\n", " img_color = cv2.pyrUp(cv2.pyrUp(img_color))\n", "\n", " # prepare edges\n", " img_edges = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n", " img_edges = cv2.adaptiveThreshold(\n", " cv2.medianBlur(img_edges, 7),\n", " 255,\n", " cv2.ADAPTIVE_THRESH_MEAN_C,\n", " cv2.THRESH_BINARY,\n", " 9,\n", " 2,\n", " )\n", " img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB)\n", " # combine color and edges\n", " img = cv2.bitwise_and(img_color, img_edges)\n", " return img\n", " elif transform == \"edges\":\n", " # perform edge detection\n", " img = cv2.cvtColor(cv2.Canny(frame, 100, 200), cv2.COLOR_GRAY2BGR)\n", " return img\n", " else:\n", " return np.flipud(frame)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " transform = gr.Dropdown(\n", " choices=[\"cartoon\", \"edges\", \"flip\"],\n", " value=\"flip\",\n", " label=\"Transformation\",\n", " )\n", " input_img = gr.Image(sources=[\"webcam\"], type=\"numpy\")\n", " with gr.Column():\n", " output_img = gr.Image(streaming=True)\n", " dep = input_img.stream(\n", " transform_cv2,\n", " [input_img, transform],\n", " [output_img],\n", " time_limit=30,\n", " stream_every=0.1,\n", " concurrency_limit=30,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import numpy as np
3
  import cv2 # type: ignore
4
 
 
5
  def transform_cv2(frame, transform):
6
  if transform == "cartoon":
7
  # prepare color
@@ -31,16 +32,26 @@ def transform_cv2(frame, transform):
31
  else:
32
  return np.flipud(frame)
33
 
 
34
  with gr.Blocks() as demo:
35
  with gr.Row():
36
  with gr.Column():
37
- transform = gr.Dropdown(choices=["cartoon", "edges", "flip"],
38
- value="flip", label="Transformation")
 
 
 
39
  input_img = gr.Image(sources=["webcam"], type="numpy")
40
  with gr.Column():
41
  output_img = gr.Image(streaming=True)
42
- dep = input_img.stream(transform_cv2, [input_img, transform], [output_img],
43
- time_limit=30, stream_every=0.1, concurrency_limit=30)
 
 
 
 
 
 
44
 
45
  if __name__ == "__main__":
46
  demo.launch()
 
2
  import numpy as np
3
  import cv2 # type: ignore
4
 
5
+
6
  def transform_cv2(frame, transform):
7
  if transform == "cartoon":
8
  # prepare color
 
32
  else:
33
  return np.flipud(frame)
34
 
35
+
36
  with gr.Blocks() as demo:
37
  with gr.Row():
38
  with gr.Column():
39
+ transform = gr.Dropdown(
40
+ choices=["cartoon", "edges", "flip"],
41
+ value="flip",
42
+ label="Transformation",
43
+ )
44
  input_img = gr.Image(sources=["webcam"], type="numpy")
45
  with gr.Column():
46
  output_img = gr.Image(streaming=True)
47
+ dep = input_img.stream(
48
+ transform_cv2,
49
+ [input_img, transform],
50
+ [output_img],
51
+ time_limit=30,
52
+ stream_every=0.1,
53
+ concurrency_limit=30,
54
+ )
55
 
56
  if __name__ == "__main__":
57
  demo.launch()