balthou commited on
Commit
ec40011
·
1 Parent(s): c00697b

update doc / comments

Browse files
Files changed (1) hide show
  1. app.py +51 -22
app.py CHANGED
@@ -7,16 +7,19 @@ from interactive_pipe import (
7
  )
8
  from transformers import pipeline
9
 
10
- # -------------------
11
- # Processing blocks
12
- # -------------------
13
 
14
 
15
  @interactive(
16
- background_color=("green", ["green", "blue", "red"]), # dropdown menu (str)
17
- border_size=(0.05, [0., 0.3]), # continuous slider (float)
18
  )
19
- def generate_background(background_color="green", border_size: float = 0.) -> np.ndarray:
 
 
 
20
  out = np.zeros((256, 256, 3)) # Initial background set to black
21
  border_int = int(border_size * 256)
22
  out[
@@ -28,17 +31,17 @@ def generate_background(background_color="green", border_size: float = 0.) -> np
28
 
29
 
30
  @interactive(
31
- radius=(0.005, [0., 0.01]), # continuous slider (float)
32
- spread=(1., [0., 2.]), # continuous slider (float)
33
  geometric_shape=("snooker", ["snooker", "circle", "traffic light"]),
34
  )
35
  def add_circles(
36
  background: np.ndarray,
37
- radius: float = 0.,
38
- spread: float = 1.,
39
- geometric_shape: str = "snooker",
40
  ) -> np.ndarray:
41
- out = background.copy()
42
  x, y = np.meshgrid(
43
  np.linspace(-1, 1, out.shape[1]), np.linspace(-1, 1, out.shape[0]))
44
  balls = [
@@ -51,14 +54,14 @@ def add_circles(
51
  ((0.075, -0.725), [1, 0, 0]),
52
  ]
53
  circle_clock = [
54
- ((np.cos(angle), np.sin(angle)), [1, 1, 0]) for angle in np.linspace(0, 2*np.pi, 12)
 
55
  ]
56
  traffic_light = [
57
  ((0.0, 0.0), [1, 0.8, 0]),
58
  ((0.0, 0.12), [0, 1, 0]),
59
  ((0.0, -0.12), [1, 0, 0])
60
  ]
61
-
62
  chosen_pattern = {"circle": circle_clock, "snooker": balls,
63
  "traffic light": traffic_light}[geometric_shape]
64
  for (cx, cy), color in chosen_pattern:
@@ -84,24 +87,43 @@ def add_details(img: np.ndarray, add_stick: bool = False) -> np.ndarray:
84
  seed=(42, [-1, 100])
85
  )
86
  def add_noise(img: np.ndarray, noise_level: float = 0., seed: int = 42):
87
- np.random.seed(seed)
 
 
 
 
88
  return (img + np.random.normal(0, noise_level, img.shape)).clip(0., 1.)
89
 
90
 
91
  @interactive(detect=(True, "Enable classification"))
92
- def apply_classifier(img, context={}, detect=False):
93
- result = {}
 
 
 
94
  if detect:
95
  if not context.get("clf", None):
96
  context["clf"] = pipeline(
97
- "image-classification", model="google/vit-base-patch16-224")
 
 
 
 
98
  result = context["clf"](Image.fromarray((img*255).astype(np.uint8)))
99
  else:
100
  result = [{"score": 0., "label": "No classification"}]*5
 
 
 
 
 
 
101
  context["result"] = result
102
 
103
 
104
- def display_result(context={}):
 
 
105
  result_dict = context.get("result", [])
106
  curves = [
107
  SingleCurve(
@@ -122,7 +144,7 @@ def display_result(context={}):
122
  # -------------------
123
 
124
 
125
- def tutorial_pipeline():
126
  background = generate_background()
127
  foreground = add_details(background)
128
  foreground = add_circles(foreground)
@@ -132,6 +154,10 @@ def tutorial_pipeline():
132
  return [[background, foreground], [noisy_input, result_curve]]
133
 
134
 
 
 
 
 
135
  if __name__ == "__main__":
136
  BACKEND_OPTIONS = ["gradio", "qt", "mpl"]
137
  parser = argparse.ArgumentParser()
@@ -140,5 +166,8 @@ if __name__ == "__main__":
140
  args = parser.parse_args()
141
  md_description = "# 🔍 EXAMPLE Interactive-pipe + machine learning \n"
142
  md_description += "```python\n"+open(__file__, 'r').read()+"```\n"
143
- interactive_pipeline(gui=args.backend, markdown_description=md_description)(
144
- tutorial_pipeline)()
 
 
 
 
7
  )
8
  from transformers import pipeline
9
 
10
+ # ----------------------------
11
+ # Processing blocks definition
12
+ # ----------------------------
13
 
14
 
15
  @interactive(
16
+ background_color=("green", ["green", "blue", "red"]),
17
+ border_size=(0.05, [0., 0.3]),
18
  )
19
+ def generate_background(
20
+ background_color: str = "green", # dropdown menu (str)
21
+ border_size: float = 0. # continuous slider (float)
22
+ ) -> np.ndarray:
23
  out = np.zeros((256, 256, 3)) # Initial background set to black
24
  border_int = int(border_size * 256)
25
  out[
 
31
 
32
 
33
  @interactive(
34
+ radius=(0.005, [0., 0.01]),
35
+ spread=(1., [0., 2.]),
36
  geometric_shape=("snooker", ["snooker", "circle", "traffic light"]),
37
  )
38
  def add_circles(
39
  background: np.ndarray,
40
+ radius: float = 0., # continuous slider (float)
41
+ spread: float = 1., # continuous slider (float)
42
+ geometric_shape: str = "snooker", # dropdown menu (str)
43
  ) -> np.ndarray:
44
+ out = background.copy() # Perform a copy to avoid inplace modifications!
45
  x, y = np.meshgrid(
46
  np.linspace(-1, 1, out.shape[1]), np.linspace(-1, 1, out.shape[0]))
47
  balls = [
 
54
  ((0.075, -0.725), [1, 0, 0]),
55
  ]
56
  circle_clock = [
57
+ ((0.7*np.cos(angle), 0.7*np.sin(angle)), [1, 1, 0])
58
+ for angle in np.linspace(0, 2*np.pi, 12)
59
  ]
60
  traffic_light = [
61
  ((0.0, 0.0), [1, 0.8, 0]),
62
  ((0.0, 0.12), [0, 1, 0]),
63
  ((0.0, -0.12), [1, 0, 0])
64
  ]
 
65
  chosen_pattern = {"circle": circle_clock, "snooker": balls,
66
  "traffic light": traffic_light}[geometric_shape]
67
  for (cx, cy), color in chosen_pattern:
 
87
  seed=(42, [-1, 100])
88
  )
89
  def add_noise(img: np.ndarray, noise_level: float = 0., seed: int = 42):
90
+ if seed > 0:
91
+ # If you do not set the seed, the noise will be different at each call
92
+ # So changing any slider value will change the noise pattern...
93
+ # This is something you want to avoid in practice in graphical user interfaces!
94
+ np.random.seed(seed)
95
  return (img + np.random.normal(0, noise_level, img.shape)).clip(0., 1.)
96
 
97
 
98
  @interactive(detect=(True, "Enable classification"))
99
+ def apply_classifier(
100
+ img: np.ndarray,
101
+ context: dict = {},
102
+ detect: bool = False
103
+ ) -> None:
104
  if detect:
105
  if not context.get("clf", None):
106
  context["clf"] = pipeline(
107
+ "image-classification",
108
+ model="google/vit-base-patch16-224"
109
+ )
110
+ # Context is used to store the classification pipeline
111
+ # and avoid reloading it.
112
  result = context["clf"](Image.fromarray((img*255).astype(np.uint8)))
113
  else:
114
  result = [{"score": 0., "label": "No classification"}]*5
115
+ # Context is shared between all interactive blocks.
116
+ # We also store the classification result inside this dictionary
117
+ # We do not return classification results.
118
+ # as these are not image/audio buffers!
119
+ # In display_result, we'll show some curves based
120
+ # on the classification results.
121
  context["result"] = result
122
 
123
 
124
+ def display_result(context: dict = {}) -> Curve:
125
+ # Context is shared between all interactive blocks.
126
+ # We can access the classification result here.
127
  result_dict = context.get("result", [])
128
  curves = [
129
  SingleCurve(
 
144
  # -------------------
145
 
146
 
147
+ def classification_tutorial_pipeline():
148
  background = generate_background()
149
  foreground = add_details(background)
150
  foreground = add_circles(foreground)
 
154
  return [[background, foreground], [noisy_input, result_curve]]
155
 
156
 
157
+ # ----------------------------------------------------------
158
+ # Main:
159
+ # allows choosing backend through the command line `-b qt`
160
+ # ----------------------------------------------------------
161
  if __name__ == "__main__":
162
  BACKEND_OPTIONS = ["gradio", "qt", "mpl"]
163
  parser = argparse.ArgumentParser()
 
166
  args = parser.parse_args()
167
  md_description = "# 🔍 EXAMPLE Interactive-pipe + machine learning \n"
168
  md_description += "```python\n"+open(__file__, 'r').read()+"```\n"
169
+ classification_tutorial_pipeline_interactive = interactive_pipeline(
170
+ gui=args.backend,
171
+ markdown_description=md_description,
172
+ )(classification_tutorial_pipeline)
173
+ classification_tutorial_pipeline_interactive()