CharlesAttend commited on
Commit
6130b66
·
verified ·
1 Parent(s): 3f4a0d5

Upload preprocessor tests

Browse files
Files changed (1) hide show
  1. test_preprocessor_config.py +125 -0
test_preprocessor_config.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marimo
2
+
3
+ __generated_with = "0.17.0"
4
+ app = marimo.App()
5
+
6
+
7
+ @app.cell
8
+ def _():
9
+ from open_clip import create_model_from_pretrained
10
+ import torch
11
+ from transformers import CLIPImageProcessor, AutoImageProcessor
12
+ from urllib.request import urlopen
13
+ from PIL import Image
14
+ return (
15
+ AutoImageProcessor,
16
+ CLIPImageProcessor,
17
+ Image,
18
+ create_model_from_pretrained,
19
+ torch,
20
+ urlopen,
21
+ )
22
+
23
+
24
+ @app.cell
25
+ def _(Image, urlopen):
26
+ image = Image.open(urlopen(
27
+ # 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
28
+ 'https://media.gettyimages.com/id/1309385864/photo/las-vegas-strip-skyline-landscaped-ultra-wide-shot-at-night.jpg?s=1024x1024&w=gi&k=20&c=bBxkjq8FoK2bPKYLtqbJMCgpsvjUm-vI7-yw04cq7AU='
29
+ ))
30
+ image.size
31
+ return (image,)
32
+
33
+
34
+ @app.cell
35
+ def _():
36
+ model = "apple/DFN5B-CLIP-ViT-H-14"
37
+ return (model,)
38
+
39
+
40
+ @app.cell
41
+ def _(
42
+ AutoImageProcessor,
43
+ CLIPImageProcessor,
44
+ create_model_from_pretrained,
45
+ model,
46
+ ):
47
+ try:
48
+ hf_pre = AutoImageProcessor.from_pretrained(model)
49
+ except:
50
+ print("Auto image processor not found")
51
+ hf_pre = CLIPImageProcessor()
52
+ hf_pre_fix = CLIPImageProcessor(
53
+ do_center_crop=False,
54
+ do_normalize=True,
55
+ do_resize=True,
56
+ feature_extractor_type="CLIPFeatureExtractor",
57
+ image_mean=[
58
+ 0.48145466,
59
+ 0.4578275,
60
+ 0.40821073
61
+ ],
62
+ image_std=[
63
+ 0.26862954,
64
+ 0.26130258,
65
+ 0.27577711
66
+ ],
67
+ size={"width": 224, "height": 224},
68
+ do_convert_rgb=True
69
+ )
70
+ _, preprocess = create_model_from_pretrained(f'hf-hub:{model}')
71
+ preprocess
72
+ return hf_pre, hf_pre_fix, preprocess
73
+
74
+
75
+ @app.cell
76
+ def _(hf_pre, hf_pre_fix, image, preprocess, torch):
77
+ hf_res_fix = torch.tensor(hf_pre_fix(images=image)["pixel_values"]).squeeze()
78
+ hf_res = torch.tensor(hf_pre(images=image)["pixel_values"]).squeeze()
79
+ op_res = preprocess(image)
80
+ op_res.shape, hf_res.shape
81
+ return hf_res, hf_res_fix, op_res
82
+
83
+
84
+ @app.cell
85
+ def _(hf_res, op_res):
86
+ (hf_res == op_res).all()
87
+ return
88
+
89
+
90
+ @app.cell
91
+ def _(hf_res_fix, op_res):
92
+ (hf_res_fix == op_res).all()
93
+ return
94
+
95
+
96
+ @app.cell
97
+ def _(hf_res, hf_res_fix, image, op_res):
98
+ # view the images
99
+ import matplotlib.pyplot as plt
100
+ fig, axs = plt.subplots(1, 4, figsize=(15, 5))
101
+ axs[0].imshow(image)
102
+ axs[0].set_title("Original Image")
103
+ axs[1].imshow(op_res.permute(1, 2, 0).numpy())
104
+ axs[1].set_title("OpenCLIP Preprocessed Image")
105
+ axs[2].imshow(hf_res.permute(1, 2, 0).numpy())
106
+ axs[2].set_title("HuggingFace Preprocessed Image")
107
+ axs[3].imshow(hf_res_fix.permute(1, 2, 0).numpy())
108
+ axs[3].set_title("HuggingFace FIXED Preprocessed Image")
109
+ plt.show()
110
+ return
111
+
112
+
113
+ @app.cell
114
+ def _(hf_pre_fix):
115
+ hf_pre_fix.to_json_file("test.json")
116
+ return
117
+
118
+
119
+ @app.cell
120
+ def _():
121
+ return
122
+
123
+
124
+ if __name__ == "__main__":
125
+ app.run()