Prachidwi commited on
Commit
2818550
·
verified ·
1 Parent(s): 1fa7caf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -5
app.py CHANGED
@@ -1,7 +1,109 @@
1
- import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install -Uqq fastbook duckduckgo_search
2
+ !pip install jupyter_contrib_nbextensions
3
+ !pip install nbdev
4
 
5
+ import fastbook
6
+ fastbook.setup_book()
7
+ from fastbook import *
8
+ from fastai.vision.widgets import *
9
+ key = os.environ.get('AZURE_SEARCH_KEY', 'XXX')
10
+ search_images_bing
11
+ search_images_ddg
12
 
13
+ doc(search_images_ddg)
14
+ def search_images(term):
15
+ url ='https://duckduckgo.com/'
16
+ res = urlread(url,data={'q':term})
17
+ searchObj=re.search(r'vqd=([\d-]+)\&',res)
18
+ requestUrl = url + 'i.js'
19
+ params = dict(l='us-en',o='json', q= term, vqd=searchObj.group(1), f=',,,', p='1', v7exp='a')
20
+ urls,data = set(),{'next':1}
21
+ while len(urls)<max_images and 'next' in data:
22
+ data = urljson(requestUrl,data=params)
23
+ urls.update(L(data['results']).itemgot('image'))
24
+ requestUrl = url + data['next']
25
+ time.sleep(0.2)
26
+ return L(urls)[:max_images]
27
+ ims = ['http://3.bp.blogspot.com/-S1scRCkI3vY/UHzV2kucsPI/AAAAAAAAA-k/YQ5UzHEm9Ss/s1600/Grizzly%2BBear%2BWildlife.jpg']
28
+ dest = 'images/grizzly.jpg'
29
+ download_url(ims[0], dest)
30
+ im = Image.open(dest)
31
+ im.to_thumb(128,128)
32
+ bear_types = 'grizzly','black','teddy'
33
+ path = Path('bears')
34
+
35
+ doc(Path)
36
+ if not path.exists():
37
+ path.mkdir()
38
+ for o in bear_types:
39
+ dest = (path/o)
40
+ dest.mkdir(exist_ok=True)
41
+ results = search_images_ddg(f'{o} bear', 150)
42
+ print(results)
43
+ download_images(dest, urls=results)
44
+ fns = get_image_files(path)
45
+ fns
46
+ failed = verify_images(fns)
47
+ failed
48
+ #??verify_images
49
+ failed = verify_images(fns)
50
+ failed
51
+ class DataLoaders(GetAttr):
52
+ def __init__(self, *loaders): self.loaders = loaders
53
+ def __getitem__(self, i): return self.loaders[i]
54
+ train,valid = add_props(lambda i,self: self[i])
55
+ bears = DataBlock(
56
+ blocks=(ImageBlock, CategoryBlock),
57
+ get_items=get_image_files,
58
+ splitter=RandomSplitter(valid_pct=0.2, seed=42),
59
+ get_y=parent_label,
60
+ item_tfms=Resize(128))
61
+
62
+ doc(DataBlock)
63
+ # Create a DataLoaders object from source
64
+ dls = bears.dataloaders(path)
65
+ dls.valid.show_batch(max_n=6, nrows=2)
66
+ doc(bears.new)
67
+ bears = bears.new(item_tfms=Resize(128, ResizeMethod.Squish))
68
+ dls = bears.dataloaders(path)
69
+ dls.valid.show_batch(max_n=6, nrows=1)
70
+ doc(bears.new)
71
+ bears = bears.new(item_tfms=Resize(128, ResizeMethod.Pad, pad_mode='zeros'))
72
+ dls = bears.dataloaders(path)
73
+ dls.valid.show_batch(max_n=6, nrows=2)
74
+ bears = bears.new(item_tfms=RandomResizedCrop(128, min_scale=0.3))
75
+ dls = bears.dataloaders(path)
76
+
77
+ doc(dls.train.show_batch)
78
+
79
+ # unique
80
+ dls.train.show_batch(max_n=3, nrows=1)
81
+ dls.train.show_batch(max_n=3, nrows=1, unique=True)
82
+ bears = bears.new(item_tfms=Resize(128), batch_tfms=aug_transforms(mult=2))
83
+ dls = bears.dataloaders(path)
84
+ dls.train.show_batch(max_n=8, nrows=2, unique=True)
85
+ bears = bears.new(
86
+ item_tfms=RandomResizedCrop(128, min_scale=0.3),
87
+ batch_tfms=aug_transforms())
88
+
89
+
90
+ dls = bears.dataloaders(path)
91
+
92
+ #dls.train.show_batch(max_n=8, nrows=2, unique=True)
93
+ learn = vision_learner(dls, resnet18, metrics=error_rate)
94
+ learn.fine_tune(4)
95
+ interp = ClassificationInterpretation.from_learner(learn)
96
+ interp.plot_confusion_matrix()
97
+ interp.plot_top_losses(3, nrows=2)
98
+
99
+ doc(interp.plot_top_losses)
100
+ ??interp.plot_top_losses
101
+
102
+ # According to the matplot docs it’s the image size in inches.
103
+ interp.plot_top_losses(3, nrows=2, figsize=(10,10))
104
+ #doc(ImageClassifierCleaner)
105
+ cleaner = ImageClassifierCleaner(learn)
106
+ cleaner
107
+ for idx in cleaner.delete(): cleaner.fns[idx].unlink()
108
+ for idx,cat in cleaner.change(): shutil.move(str(cleaner.fns[idx]), path/cat)
109
+ for idx in cleaner.delete(): cleaner.fns[idx].unlink()