alexdphan commited on
Commit
85583d7
·
1 Parent(s): 9377734

finished basic classification

Browse files
Files changed (8) hide show
  1. .ipynb_checkpoints/app-checkpoint.ipynb +266 -0
  2. app.ipynb +0 -0
  3. app.py +0 -9
  4. app/app.py +28 -0
  5. cat.jpg +0 -0
  6. dog.jpg +0 -0
  7. dunno.jpg +0 -0
  8. model.pkl +3 -0
.ipynb_checkpoints/app-checkpoint.ipynb ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 149,
6
+ "id": "4fbf20c3",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "#|default_exp app"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 150,
16
+ "id": "5cd0900b",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "!pip install -Uqq fastai"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 151,
26
+ "id": "e9fdfb5c",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "from fastai.vision.all import *\n",
31
+ "import gradio as gr\n",
32
+ "\n",
33
+ "def is_cat(x): return x[0].isupper() "
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 152,
39
+ "id": "9a9003fb",
40
+ "metadata": {
41
+ "scrolled": true
42
+ },
43
+ "outputs": [],
44
+ "source": [
45
+ "im = PILImage.create('dog.jpg')\n",
46
+ "im.thumbnail((192,192))\n",
47
+ "im"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "markdown",
52
+ "id": "7683e0bb",
53
+ "metadata": {},
54
+ "source": [
55
+ "Wrote #|export to know what you would need to include in your python script"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": 153,
61
+ "id": "b6eafa98",
62
+ "metadata": {},
63
+ "outputs": [],
64
+ "source": [
65
+ "#|export\n",
66
+ "learn = load_learner('model.pkl')"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": 154,
72
+ "id": "c58e6bec",
73
+ "metadata": {
74
+ "scrolled": false
75
+ },
76
+ "outputs": [],
77
+ "source": [
78
+ "%time learn.predict(im)"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "markdown",
83
+ "id": "89895bea",
84
+ "metadata": {},
85
+ "source": [
86
+ "### Gradio doesn't offer numpy arrays. It returns tensors. It changes it into a normal float\n",
87
+ "\n",
88
+ "zip(categories, map(float,probs)): The zip() function takes two iterables as its arguments and returns an iterator of pairs where the first element of each passed iterable is paired together, the second element of each passed iterable is paired together, and so on. In this case, the categories tuple and the iterable of floating-point numbers are passed, resulting in an iterable of pairs with a category (e.g., 'Dog' or 'Cat') as the first element and the corresponding probability as the second element.\n",
89
+ "\n",
90
+ "dict(zip(categories, map(float,probs))): The dict() function takes the iterable of pairs and converts it into a dictionary. The first element of each pair becomes a key in the dictionary, and the second element becomes the corresponding value.\n",
91
+ "\n"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": 155,
97
+ "id": "60943f1e",
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "#|export\n",
102
+ "categories = ('Dog', 'Cat')\n",
103
+ "\n",
104
+ "def classify_image(img):\n",
105
+ " pred,idx,probs = learn.predict(img)\n",
106
+ " return dict(zip(categories, map(float,probs)))"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": 156,
112
+ "id": "faa912af",
113
+ "metadata": {},
114
+ "outputs": [],
115
+ "source": [
116
+ "classify_image(im)"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": 157,
122
+ "id": "b2cc2ec2",
123
+ "metadata": {
124
+ "scrolled": true
125
+ },
126
+ "outputs": [],
127
+ "source": [
128
+ "#|export\n",
129
+ "image = gr.inputs.Image(shape=(192,192))\n",
130
+ "label = gr.outputs.Label()\n",
131
+ "examples = ['dog.jpg', 'cat.jpg', 'dunno.jpg']\n",
132
+ "\n",
133
+ "intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)\n",
134
+ "intf.launch(inline=False)"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "code",
139
+ "execution_count": 164,
140
+ "id": "933e4d13",
141
+ "metadata": {},
142
+ "outputs": [],
143
+ "source": [
144
+ "m = learn.model"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": 165,
150
+ "id": "a95e8301",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "ps = list(m.parameters())"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": 166,
160
+ "id": "dba2620d",
161
+ "metadata": {
162
+ "scrolled": true
163
+ },
164
+ "outputs": [],
165
+ "source": [
166
+ "ps[1]"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": 167,
172
+ "id": "eb9bac04",
173
+ "metadata": {},
174
+ "outputs": [],
175
+ "source": [
176
+ "ps[0].shape"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": 168,
182
+ "id": "9c5971f4",
183
+ "metadata": {},
184
+ "outputs": [],
185
+ "source": [
186
+ "ps[0]"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "id": "3a97f529",
192
+ "metadata": {},
193
+ "source": [
194
+ "### export -"
195
+ ]
196
+ },
197
+ {
198
+ "cell_type": "code",
199
+ "execution_count": 169,
200
+ "id": "0db010b4",
201
+ "metadata": {},
202
+ "outputs": [],
203
+ "source": [
204
+ "from nbdev.export import notebook2script\n",
205
+ "import os\n",
206
+ "print(os.listdir())\n",
207
+ "print('hi')"
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "code",
212
+ "execution_count": 170,
213
+ "id": "d7c9f03c",
214
+ "metadata": {},
215
+ "outputs": [],
216
+ "source": [
217
+ "notebook2script('app.ipynb')|"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "execution_count": null,
223
+ "id": "cb79a018",
224
+ "metadata": {},
225
+ "outputs": [],
226
+ "source": []
227
+ },
228
+ {
229
+ "cell_type": "code",
230
+ "execution_count": null,
231
+ "id": "9a55ccd6",
232
+ "metadata": {},
233
+ "outputs": [],
234
+ "source": []
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": null,
239
+ "id": "14ca061f",
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": []
243
+ }
244
+ ],
245
+ "metadata": {
246
+ "kernelspec": {
247
+ "display_name": "Python 3 (ipykernel)",
248
+ "language": "python",
249
+ "name": "python3"
250
+ },
251
+ "language_info": {
252
+ "codemirror_mode": {
253
+ "name": "ipython",
254
+ "version": 3
255
+ },
256
+ "file_extension": ".py",
257
+ "mimetype": "text/x-python",
258
+ "name": "python",
259
+ "nbconvert_exporter": "python",
260
+ "pygments_lexer": "ipython3",
261
+ "version": "3.11.2"
262
+ }
263
+ },
264
+ "nbformat": 4,
265
+ "nbformat_minor": 5
266
+ }
app.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
app.py DELETED
@@ -1,9 +0,0 @@
1
- import gradio as gr
2
-
3
-
4
- def greet(name):
5
- return "Hello " + name + "!!"
6
-
7
-
8
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
9
- iface.launch()
 
 
 
 
 
 
 
 
 
 
app/app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb.
2
+
3
+ # %% auto 0
4
+ __all__ = ['learn', 'categories', 'image', 'label', 'examples', 'intf', 'is_cat', 'classify_image']
5
+
6
+ # %% ../app.ipynb 2
7
+ from fastai.vision.all import *
8
+ import gradio as gr
9
+
10
+ def is_cat(x): return x[0].isupper()
11
+
12
+ # %% ../app.ipynb 5
13
+ learn = load_learner('model.pkl')
14
+
15
+ # %% ../app.ipynb 8
16
+ categories = ('Dog', 'Cat')
17
+
18
+ def classify_image(img):
19
+ pred,idx,probs = learn.predict(img)
20
+ return dict(zip(categories, map(float,probs)))
21
+
22
+ # %% ../app.ipynb 10
23
+ image = gr.inputs.Image(shape=(192,192))
24
+ label = gr.outputs.Label()
25
+ examples = ['dog.jpg', 'cat.jpg', 'dunno.jpg']
26
+
27
+ intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
28
+ intf.launch(inline=False)
cat.jpg ADDED
dog.jpg ADDED
dunno.jpg ADDED
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cdd3e67bed2c5294c63dd37925d2a6dfecba69da039ad4cb7214508e5f786e0
3
+ size 47063121