BobShih1008 commited on
Commit
d7546f7
·
1 Parent(s): 87dd499

initialization

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.xml filter=lfs diff=lfs merge=lfs -text
37
+ *.xml.encrypted filter=lfs diff=lfs merge=lfs -text
38
+ *.bin.encrypted filter=lfs diff=lfs merge=lfs -text
39
+ *.encrypted filter=lfs diff=lfs merge=lfs -text
40
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  title: RAW Denoise
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 4.37.2
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: RAW Denoise
3
+ emoji: 📚
4
+ colorFrom: gray
5
+ colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.31.4
8
  app_file: app.py
9
  pinned: false
10
+ license: unknown
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from customs.utils import rgb2rggb, rggb2rgb, CV72fillCurve, rggb2rgb_np
3
+ import torch,os
4
+ import numpy as np
5
+ from openvino.inference_engine import IECore
6
+ from cryptography.fernet import Fernet
7
+
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
+ CONFIG = {
10
+ "noise_levels": [4, 6, 8, 10, 12],
11
+ "raw_images": [
12
+ "data/RAW/noisy/4Card_Gain160_E30.npy",
13
+ "data/RAW/noisy/4Card_Gain180_E30.npy",
14
+ "data/RAW/noisy/4Card_Gain200_E30.npy",
15
+ "data/RAW/noisy/4Card_Gain220_E30.npy",
16
+ "data/RAW/noisy/4Card_Gain240_E30.npy"
17
+ ],
18
+ "weights": [
19
+ "customs/weights/model_ir_0.xml.encrypted",
20
+ "customs/weights/model_ir_1.xml.encrypted",
21
+ "customs/weights/model_ir_2.xml.encrypted",
22
+ "customs/weights/model_ir_3.xml.encrypted",
23
+ "customs/weights/model_ir_4.xml.encrypted"
24
+ ],
25
+ "SIDD_model_weights": "customs/weights/model_ir_SIDD.xml.encrypted"
26
+ }
27
+
28
+ def main():
29
+ with gr.Blocks() as demo:
30
+ create_text("Raw Image Denoiser",size=10)
31
+ create_text("Data Detail : Collect images of imx678 image sensor and analyze the noise composition and distribution",size=5)
32
+ create_text("Model Detail : ",size=5)
33
+ create_text("Synthesis Data : We have the technology to analyze and apply noise",size=3)
34
+ create_text("Our Denoiser : Our model architecture is trained on synthesize noise with SIDD ground truth",size=3)
35
+ create_text("SIDD Denoiser : Our model architecture is trained on SIDD dataset",size=3)
36
+ create_text("Community : Any questions please contact us (tim.liu@liteon.com)",size=3)
37
+ with gr.Tab("Synthesis"):
38
+ with gr.Column():
39
+ with gr.Row():
40
+ image1 = gr.Image(label="Your Input Image")
41
+ with gr.Column():
42
+ noise_level1 = create_slider("noise level")
43
+ denoise_level1 = create_slider("denoise level")
44
+ use_synthesis = gr.Checkbox(label="Use synthesis", value=True)
45
+ image_button1 = gr.Button("Inference")
46
+ # create_text("SIDD Denoiser : Our model architecture is trained to SIDD dataset")
47
+ image_input1 = [image1, noise_level1, denoise_level1, use_synthesis]
48
+
49
+ with gr.Row():
50
+ SynthesisNoise1 = gr.Image(label="Synthesis noise")
51
+ OurDenoise1 = gr.Image(label="Our denoiser result")
52
+ with gr.Row():
53
+ SIDDDenoise1 = gr.Image(label="SIDD denoiser result")
54
+ examples1 = gr.Examples(examples=[["data/RGB/4Card.png"],["data/RGB/Color.png"],["data/RGB/Focus.png"]],inputs=image_input1)
55
+
56
+ image_output1 = [SynthesisNoise1, OurDenoise1, SIDDDenoise1]
57
+
58
+ with gr.Tab("Real"):
59
+ with gr.Column():
60
+ with gr.Row():
61
+ with gr.Column():
62
+ noise_level2 = create_slider("noise level")
63
+ denoise_level2 = create_slider("denoise level")
64
+ image_button2 = gr.Button("Inference")
65
+ image_input2 = [noise_level2, denoise_level2]
66
+ RealRow = gr.Image(label="Real noise")
67
+ with gr.Row():
68
+ OurDenoise2 = gr.Image(label="Our denoiser result")
69
+ SIDDDenoise2 = gr.Image(label="SIDD denoiser result")
70
+
71
+ image_output2 = [RealRow, OurDenoise2, SIDDDenoise2]
72
+
73
+ image_button1.click(denoise_synthesis, inputs=image_input1, outputs=image_output1)
74
+ image_button2.click(denoise_real, inputs=image_input2, outputs=image_output2)
75
+ demo.launch()
76
+
77
+ def decrypt_model(encrypted_file_path, decrypted_file_path):
78
+ """
79
+ 解密模型文件
80
+ """
81
+ # if key file not exist, get env key
82
+ if os.path.exists("IRModelKey.txt"):
83
+ with open("IRModelKey.txt", "rb") as file:
84
+ key = file.read()
85
+ else:
86
+ # get env key
87
+ key = os.getenv("IRModelKey")
88
+ cipher_suite = Fernet(key)
89
+ with open(encrypted_file_path, 'rb') as file:
90
+ encrypted_data = file.read()
91
+ decrypted_data = cipher_suite.decrypt(encrypted_data)
92
+ with open(decrypted_file_path, 'wb') as file:
93
+ file.write(decrypted_data)
94
+
95
+ class IEModel:
96
+ """Class for inference of models in the Inference Engine format"""
97
+ def __init__(self, exec_net, inputs_info, input_key, output_key, switch_rb=True):
98
+ self.net = exec_net
99
+ self.inputs_info = inputs_info
100
+ self.input_key = input_key
101
+ self.output_key = output_key
102
+ self.reqs_ids = []
103
+ self.switch_rb = switch_rb
104
+
105
+ def _preprocess(self, img):
106
+ _, _, h, w = self.get_input_shape()
107
+ img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
108
+ return img
109
+
110
+ def forward(self, img):
111
+ """Performs forward pass of the wrapped IE model"""
112
+ res = self.net.infer(inputs={self.input_key: self._preprocess(img)})
113
+ return np.copy(res[self.output_key])
114
+
115
+ def forward_async(self, img):
116
+ id = len(self.reqs_ids)
117
+ self.net.start_async(request_id=id,
118
+ inputs={self.input_key: self._preprocess(img)})
119
+ self.reqs_ids.append(id)
120
+
121
+ def grab_all_async(self):
122
+ outputs = []
123
+ for id in self.reqs_ids:
124
+ self.net.requests[id].wait(-1)
125
+ res = self.net.requests[id].output_blobs[self.output_key].buffer
126
+ outputs.append(np.copy(res))
127
+ self.reqs_ids = []
128
+ return outputs
129
+
130
+ def get_input_shape(self):
131
+ """Returns an input shape of the wrapped IE model"""
132
+ return self.inputs_info[self.input_key].input_data.shape
133
+
134
+ def load_ie_model(model_xml, device, plugin_dir, cpu_extension='', num_reqs=1, **kwargs):
135
+ """Loads a model in the Inference Engine format"""
136
+ if cpu_extension and 'CPU' in device:
137
+ IECore().add_extension(cpu_extension, 'CPU')
138
+ # Read IR
139
+ net = IECore().read_network(model_xml, os.path.splitext(model_xml)[0] + ".bin")
140
+
141
+ assert len(net.input_info) == 1 or len(net.input_info) == 2, \
142
+ "Supports topologies with only 1 or 2 inputs"
143
+ assert len(net.outputs) == 1 or len(net.outputs) == 4 or len(net.outputs) == 5, \
144
+ "Supports topologies with only 1, 4 or 5 outputs"
145
+
146
+ input_blob = next(iter(net.input_info))
147
+ out_blob = next(iter(net.outputs))
148
+ net.batch_size = 1
149
+
150
+ # Loading model to the plugin
151
+ exec_net = IECore().load_network(network=net, device_name=device, num_requests=num_reqs)
152
+ model = IEModel(exec_net, net.input_info, input_blob, out_blob, **kwargs)
153
+ return model
154
+
155
+ decrypt_model("customs/weights/model_ir_0.xml.encrypted", "customs/weights/model_ir_0_decrypted.xml")
156
+ decrypt_model("customs/weights/model_ir_0.bin.encrypted", "customs/weights/model_ir_0_decrypted.bin")
157
+ decrypt_model("customs/weights/model_ir_1.xml.encrypted", "customs/weights/model_ir_1_decrypted.xml")
158
+ decrypt_model("customs/weights/model_ir_1.bin.encrypted", "customs/weights/model_ir_1_decrypted.bin")
159
+ decrypt_model("customs/weights/model_ir_2.xml.encrypted", "customs/weights/model_ir_2_decrypted.xml")
160
+ decrypt_model("customs/weights/model_ir_2.bin.encrypted", "customs/weights/model_ir_2_decrypted.bin")
161
+ decrypt_model("customs/weights/model_ir_3.xml.encrypted", "customs/weights/model_ir_3_decrypted.xml")
162
+ decrypt_model("customs/weights/model_ir_3.bin.encrypted", "customs/weights/model_ir_3_decrypted.bin")
163
+ decrypt_model("customs/weights/model_ir_4.xml.encrypted", "customs/weights/model_ir_4_decrypted.xml")
164
+ decrypt_model("customs/weights/model_ir_4.bin.encrypted", "customs/weights/model_ir_4_decrypted.bin")
165
+ decrypt_model("customs/weights/model_ir_SIDD.xml.encrypted", "customs/weights/model_ir_SIDD_decrypted.xml")
166
+ decrypt_model("customs/weights/model_ir_SIDD.bin.encrypted", "customs/weights/model_ir_SIDD_decrypted.bin")
167
+ denoiseModelList = [load_ie_model(weight.split('.')[0] + "_decrypted.xml", "CPU", None, "")for weight in CONFIG["weights"]]
168
+ SIDD_model = load_ie_model(CONFIG["SIDD_model_weights"].split('.')[0] + "_decrypted.xml", "CPU", None, "")
169
+
170
+ def denoise_synthesis(image, noise_level=1, denoise_level=1, use_synthesis=True):
171
+ # # Assuming image is a numpy array
172
+ # rgb = np.transpose(image, (2, 0, 1))[np.newaxis, :]
173
+ # # rgb is not 1080 x 1920, resize it , test in 360 x 640
174
+ # rgb = cv2.resize(rgb[0].transpose(1,2,0), (1920, 1080)).transpose(2,0,1)[np.newaxis, :]
175
+ # rggb = rgb2rggb_np(np.transpose(rgb.squeeze(0), (1, 2, 0))) / 255 # Normalize to [0, 1]
176
+ # if use_synthesis:
177
+ # noiseImage = CV72fillCurve_np(rggb, CONFIG["noise_levels"][noise_level-1], CONFIG["noise_levels"][noise_level-1]+1)
178
+ # rgb = rggb2rgb_np(noiseImage)
179
+ # rgb = np.clip(rgb, 0, 1) # In-place clipping
180
+
181
+ # torch function speed more than numpy
182
+ rgb = torch.tensor(image).permute(2, 0, 1).unsqueeze(0)
183
+ # rgb is not 1080 x 1920, resize it , test in 360 x 640
184
+ rgb = torch.nn.functional.interpolate(rgb, size=(1080, 1920), mode='bilinear', align_corners=False)
185
+ rggb = rgb2rggb(rgb.squeeze(0).permute(1, 2, 0)) / 255 # Normalize to [0, 1]
186
+ if use_synthesis:
187
+ rggb = CV72fillCurve(rggb, CONFIG["noise_levels"][noise_level-1], CONFIG["noise_levels"][noise_level-1]+1)
188
+ rgb = rggb2rgb(rggb)
189
+ rgb = rgb.clamp_(0, 1).cpu().numpy() # In-place clipping
190
+ noiseImage = rggb.numpy()
191
+ output = denoiseModelList[denoise_level-1].forward(noiseImage)
192
+ SIDDOutput = SIDD_model.forward(noiseImage)
193
+ return rgb, RGGB2RGBNumpy(output.squeeze().transpose(1, 2, 0)), RGGB2RGBNumpy(SIDDOutput.squeeze().transpose(1, 2, 0))
194
+
195
+ def denoise_real(noise_level=1, denoise_level=1):
196
+ noiseImage = np.load(CONFIG["raw_images"][noise_level-1]).astype(np.float32) / 65535.0
197
+ # noiseImage = torch.from_numpy(noiseImage).permute(2, 0, 1).to(device).unsqueeze(0)
198
+ output = denoiseModelList[denoise_level-1].forward(noiseImage)
199
+ SIDDOutput = SIDD_model.forward(noiseImage)
200
+ return RGGB2RGBNumpy(noiseImage), RGGB2RGBNumpy(output.squeeze().transpose(1, 2, 0)), RGGB2RGBNumpy(SIDDOutput.squeeze().transpose(1, 2, 0))
201
+
202
+ def create_slider(label):
203
+ return gr.Slider(minimum=1, maximum=5, value=1, step=1, interactive=True, label=label)
204
+
205
+ def create_text(text,size = 3,color = "black"):
206
+ gr.Markdown("<font size="+str(size)+" color="+str(color)+">"+str(text)+"</font>")
207
+
208
+ def RGGB2RGBNumpy(numpyInput):
209
+ # Assuming rggb2rgb is a function that can handle numpy arrays
210
+ output = rggb2rgb_np(numpyInput)
211
+ # In-place clipping
212
+ output = np.clip(output, 0, 1)
213
+ return output
214
+
215
+ if __name__ == "__main__":
216
+ main()
217
+
customs/gaussian_table_AMBA_4k0111_update.json ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": {
3
+ "0": {
4
+ "mu": 0.04884533454585538,
5
+ "std": 0.00036523623507143493,
6
+ "row_std": 2.1192263945304984e-05
7
+ },
8
+ "1": {
9
+ "mu": 0.04885432883089133,
10
+ "std": 0.0003590921086861262,
11
+ "row_std": 2.0632113591805363e-05
12
+ },
13
+ "2": {
14
+ "mu": 0.04883828976450504,
15
+ "std": 0.00035916750637214866,
16
+ "row_std": 2.1054889173977102e-05
17
+ },
18
+ "3": {
19
+ "mu": 0.04886586050908968,
20
+ "std": 0.00036625797807880045,
21
+ "row_std": 2.080488683538461e-05
22
+ }
23
+ },
24
+ "1": {
25
+ "0": {
26
+ "mu": 0.048910118270361326,
27
+ "std": 0.0007679587162312924,
28
+ "row_std": 5.647232988739475e-05
29
+ },
30
+ "1": {
31
+ "mu": 0.04897846737024978,
32
+ "std": 0.0007657190460110321,
33
+ "row_std": 5.5061890891124056e-05
34
+ },
35
+ "2": {
36
+ "mu": 0.048902397863624714,
37
+ "std": 0.0007671995968468128,
38
+ "row_std": 5.622613422385847e-05
39
+ },
40
+ "3": {
41
+ "mu": 0.04899352096805801,
42
+ "std": 0.0007698302663261498,
43
+ "row_std": 5.437209611421559e-05
44
+ }
45
+ },
46
+ "2": {
47
+ "0": {
48
+ "mu": 0.048857065013604366,
49
+ "std": 0.0012123489716326134,
50
+ "row_std": 7.08703314107849e-05
51
+ },
52
+ "1": {
53
+ "mu": 0.04893770161594236,
54
+ "std": 0.0011744860955057528,
55
+ "row_std": 6.74400964749618e-05
56
+ },
57
+ "2": {
58
+ "mu": 0.04883367026277906,
59
+ "std": 0.0011848557755693141,
60
+ "row_std": 7.046526265271678e-05
61
+ },
62
+ "3": {
63
+ "mu": 0.04899051627813202,
64
+ "std": 0.0012225518384126255,
65
+ "row_std": 6.699616703751188e-05
66
+ }
67
+ },
68
+ "3": {
69
+ "0": {
70
+ "mu": 0.04957285036422768,
71
+ "std": 0.0039884736321273515,
72
+ "row_std": 0.0001806924709740455
73
+ },
74
+ "1": {
75
+ "mu": 0.048884518345166494,
76
+ "std": 0.003988940591138176,
77
+ "row_std": 0.00019624591450196352
78
+ },
79
+ "2": {
80
+ "mu": 0.049503986978813364,
81
+ "std": 0.004022970723134317,
82
+ "row_std": 0.0001866611784478792
83
+ },
84
+ "3": {
85
+ "mu": 0.04904338346410105,
86
+ "std": 0.004021104097136626,
87
+ "row_std": 0.00020614522450051762
88
+ }
89
+ },
90
+ "4": {
91
+ "0": {
92
+ "mu": 0.051179580803712744,
93
+ "std": 0.01516965579714122,
94
+ "row_std": 0.0006848126111928647
95
+ },
96
+ "1": {
97
+ "mu": 0.048555232287003126,
98
+ "std": 0.015131666031611908,
99
+ "row_std": 0.0008252242178262105
100
+ },
101
+ "2": {
102
+ "mu": 0.05092065717846968,
103
+ "std": 0.015306564497661098,
104
+ "row_std": 0.0006824809953618861
105
+ },
106
+ "3": {
107
+ "mu": 0.04921186702957536,
108
+ "std": 0.015234968339770111,
109
+ "row_std": 0.0008159484522919293
110
+ }
111
+ },
112
+ "5": {
113
+ "0": {
114
+ "mu": 0.05194902646703862,
115
+ "std": 0.02345391398272158,
116
+ "row_std": 0.0010309458194859393
117
+ },
118
+ "1": {
119
+ "mu": 0.050978013292755774,
120
+ "std": 0.023448010228184248,
121
+ "row_std": 0.0010183040421496512
122
+ },
123
+ "2": {
124
+ "mu": 0.05156257428110727,
125
+ "std": 0.02358192047699801,
126
+ "row_std": 0.0010264660962841189
127
+ },
128
+ "3": {
129
+ "mu": 0.052059536793260745,
130
+ "std": 0.02374497801210408,
131
+ "row_std": 0.0010426603459159902
132
+ }
133
+ },
134
+ "6": {
135
+ "0": {
136
+ "mu": 0.05271847213036449,
137
+ "std": 0.030823781597773586,
138
+ "row_std": 0.001320962575344441
139
+ },
140
+ "1": {
141
+ "mu": 0.05340079429850842,
142
+ "std": 0.030849861367983702,
143
+ "row_std": 0.0013030114220171606
144
+ },
145
+ "2": {
146
+ "mu": 0.05220449138374486,
147
+ "std": 0.0310291804422537,
148
+ "row_std": 0.0013147798525491635
149
+ },
150
+ "3": {
151
+ "mu": 0.05490720655694614,
152
+ "std": 0.031227799293258977,
153
+ "row_std": 0.0013365933725464601
154
+ }
155
+ },
156
+ "7": {
157
+ "0": {
158
+ "mu": 0.053487917793690366,
159
+ "std": 0.040509465187225646,
160
+ "row_std": 0.001692564335078927
161
+ },
162
+ "1": {
163
+ "mu": 0.05582357530426107,
164
+ "std": 0.04058826046057689,
165
+ "row_std": 0.0016673200690856791
166
+ },
167
+ "2": {
168
+ "mu": 0.052846408486382446,
169
+ "std": 0.040828313362224755,
170
+ "row_std": 0.001684075165197392
171
+ },
172
+ "3": {
173
+ "mu": 0.05775487632063153,
174
+ "std": 0.0410687029570197,
175
+ "row_std": 0.001713388113907482
176
+ }
177
+ },
178
+ "8": {
179
+ "0": {
180
+ "mu": 0.05425736345701624,
181
+ "std": 0.06282588232727848,
182
+ "row_std": 0.002659041836231682
183
+ },
184
+ "1": {
185
+ "mu": 0.058246356310013715,
186
+ "std": 0.06315593086974453,
187
+ "row_std": 0.002395979118305699
188
+ },
189
+ "2": {
190
+ "mu": 0.05348832558902004,
191
+ "std": 0.06334979243468684,
192
+ "row_std": 0.0025916868606935843
193
+ },
194
+ "3": {
195
+ "mu": 0.060602546084316915,
196
+ "std": 0.0637482828951418,
197
+ "row_std": 0.002443527678588156
198
+ }
199
+ },
200
+ "9": {
201
+ "0": {
202
+ "mu": 0.06880619950180135,
203
+ "std": 0.06996771502462133,
204
+ "row_std": 0.0027787823319460475
205
+ },
206
+ "1": {
207
+ "mu": 0.07468870728144456,
208
+ "std": 0.07025785434174985,
209
+ "row_std": 0.002729986426163404
210
+ },
211
+ "2": {
212
+ "mu": 0.06785894824429257,
213
+ "std": 0.07068767675770854,
214
+ "row_std": 0.002762983767609609
215
+ },
216
+ "3": {
217
+ "mu": 0.07821418040175274,
218
+ "std": 0.0710313718425912,
219
+ "row_std": 0.0028155849988717005
220
+ }
221
+ },
222
+ "10": {
223
+ "0": {
224
+ "mu": 0.08335503554658646,
225
+ "std": 0.09195350372662747,
226
+ "row_std": 0.003560485329247844
227
+ },
228
+ "1": {
229
+ "mu": 0.09113105825287539,
230
+ "std": 0.09243620440977662,
231
+ "row_std": 0.0034932626681255497
232
+ },
233
+ "2": {
234
+ "mu": 0.08222957089956512,
235
+ "std": 0.09301111329325708,
236
+ "row_std": 0.003539050538273192
237
+ },
238
+ "3": {
239
+ "mu": 0.09582581471918858,
240
+ "std": 0.09341568656305245,
241
+ "row_std": 0.003609317515597133
242
+ }
243
+ },
244
+ "11": {
245
+ "0": {
246
+ "mu": 0.0979038715913716,
247
+ "std": 0.12084783452807416,
248
+ "row_std": 0.004562090248684959
249
+ },
250
+ "1": {
251
+ "mu": 0.10757340922430622,
252
+ "std": 0.12161561103366324,
253
+ "row_std": 0.004469943129229767
254
+ },
255
+ "2": {
256
+ "mu": 0.09660019355483765,
257
+ "std": 0.12238437579019316,
258
+ "row_std": 0.004533098912588848
259
+ },
260
+ "3": {
261
+ "mu": 0.1134374490366244,
262
+ "std": 0.12285403293892105,
263
+ "row_std": 0.0046268086147698215
264
+ }
265
+ },
266
+ "12": {
267
+ "0": {
268
+ "mu": 0.11245270763615671,
269
+ "std": 0.21799335234166645,
270
+ "row_std": 0.007758689083129044
271
+ },
272
+ "1": {
273
+ "mu": 0.12401576019573704,
274
+ "std": 0.21989850331007596,
275
+ "row_std": 0.007001712632885403
276
+ },
277
+ "2": {
278
+ "mu": 0.11097081621011019,
279
+ "std": 0.22041589626618188,
280
+ "row_std": 0.007715688230680361
281
+ },
282
+ "3": {
283
+ "mu": 0.1310490833540602,
284
+ "std": 0.2227708559950361,
285
+ "row_std": 0.007402277517094011
286
+ }
287
+ }
288
+ }
customs/k_table_AMBA_4k0111_update.json ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "0": {
3
+ "0": {
4
+ "K": 9.465562984038772e-05
5
+ },
6
+ "1": {
7
+ "K": 9.601915868378001e-05
8
+ },
9
+ "2": {
10
+ "K": 9.599987238132664e-05
11
+ },
12
+ "3": {
13
+ "K": 9.497633660608725e-05
14
+ }
15
+ },
16
+ "1": {
17
+ "0": {
18
+ "K": 0.00037416072090588296
19
+ },
20
+ "1": {
21
+ "K": 0.0003771304526158978
22
+ },
23
+ "2": {
24
+ "K": 0.0003777319715400697
25
+ },
26
+ "3": {
27
+ "K": 0.00037443032854174235
28
+ }
29
+ },
30
+ "2": {
31
+ "0": {
32
+ "K": 0.001413779109788249
33
+ },
34
+ "1": {
35
+ "K": 0.0014158302616208089
36
+ },
37
+ "2": {
38
+ "K": 0.0014192205107157842
39
+ },
40
+ "3": {
41
+ "K": 0.0014145217849946378
42
+ }
43
+ },
44
+ "3": {
45
+ "0": {
46
+ "K": 0.005717976821537382
47
+ },
48
+ "1": {
49
+ "K": 0.005728937593309858
50
+ },
51
+ "2": {
52
+ "K": 0.005743112226402247
53
+ },
54
+ "3": {
55
+ "K": 0.0057202041041990665
56
+ }
57
+ },
58
+ "4": {
59
+ "0": {
60
+ "K": 0.02283841937703782
61
+ },
62
+ "1": {
63
+ "K": 0.02313202101123309
64
+ },
65
+ "2": {
66
+ "K": 0.023103205170867457
67
+ },
68
+ "3": {
69
+ "K": 0.022942343069204247
70
+ }
71
+ },
72
+ "5": {
73
+ "0": {
74
+ "K": 0.03178069184342341
75
+ },
76
+ "1": {
77
+ "K": 0.03132001430134658
78
+ },
79
+ "2": {
80
+ "K": 0.03132335616453206
81
+ },
82
+ "3": {
83
+ "K": 0.03205151971156825
84
+ }
85
+ },
86
+ "6": {
87
+ "0": {
88
+ "K": 0.044795230279835715
89
+ },
90
+ "1": {
91
+ "K": 0.0440557750241913
92
+ },
93
+ "2": {
94
+ "K": 0.044057320482160635
95
+ },
96
+ "3": {
97
+ "K": 0.045203759517728224
98
+ }
99
+ },
100
+ "7": {
101
+ "0": {
102
+ "K": 0.06313936353902103
103
+ },
104
+ "1": {
105
+ "K": 0.06197032013803114
106
+ },
107
+ "2": {
108
+ "K": 0.06196805597305982
109
+ },
110
+ "3": {
111
+ "K": 0.06375297935714079
112
+ }
113
+ },
114
+ "8": {
115
+ "0": {
116
+ "K": 0.09421036190877768
117
+ },
118
+ "1": {
119
+ "K": 0.09170346908984744
120
+ },
121
+ "2": {
122
+ "K": 0.09137848357396919
123
+ },
124
+ "3": {
125
+ "K": 0.09551643673004001
126
+ }
127
+ },
128
+ "9": {
129
+ "0": {
130
+ "K": 0.12544029084557606
131
+ },
132
+ "1": {
133
+ "K": 0.12261554356458139
134
+ },
135
+ "2": {
136
+ "K": 0.12259350310068033
137
+ },
138
+ "3": {
139
+ "K": 0.12680966186589437
140
+ }
141
+ },
142
+ "10": {
143
+ "0": {
144
+ "K": 0.17680945218188984
145
+ },
146
+ "1": {
147
+ "K": 0.17247510648543582
148
+ },
149
+ "2": {
150
+ "K": 0.17243175433586605
151
+ },
152
+ "3": {
153
+ "K": 0.17884560579638847
154
+ }
155
+ },
156
+ "11": {
157
+ "0": {
158
+ "K": 0.24921484293547064
159
+ },
160
+ "1": {
161
+ "K": 0.24260922793605236
162
+ },
163
+ "2": {
164
+ "K": 0.24253087766752507
165
+ },
166
+ "3": {
167
+ "K": 0.2522343348451105
168
+ }
169
+ },
170
+ "12": {
171
+ "0": {
172
+ "K": 0.333474050511197
173
+ },
174
+ "1": {
175
+ "K": 0.31967327985902605
176
+ },
177
+ "2": {
178
+ "K": 0.32024512603330063
179
+ },
180
+ "3": {
181
+ "K": 0.3386428894141024
182
+ }
183
+ }
184
+ }
customs/utils.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import json
4
+ import random
5
+ device = "cuda" if torch.cuda.is_available() else "cpu"
6
+
7
+ def pack_raw(raw):
8
+ # pack Bayer image to 4 channels
9
+ im = raw.raw_image_visible.astype(np.float32)
10
+ im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
11
+
12
+ im = np.expand_dims(im, axis=2)
13
+ img_shape = im.shape
14
+ H = img_shape[0]
15
+ W = img_shape[1]
16
+
17
+ out = np.concatenate((im[0:H:2, 0:W:2, :],
18
+ im[0:H:2, 1:W:2, :],
19
+ im[1:H:2, 1:W:2, :],
20
+ im[1:H:2, 0:W:2, :]), axis=2)
21
+ return out
22
+
23
+ def CV72fillCurve(img, first_index, end_index):
24
+ with open('customs/k_table_AMBA_4k0111_update.json') as f:
25
+ K_tableCV72_load_json = json.load(f)
26
+ with open('customs/gaussian_table_AMBA_4k0111_update.json') as f:
27
+ GS_tableCV72_load_json = json.load(f)
28
+ img = torch.clone(img)
29
+ imgPoisson = img - (200/4096)
30
+ imgPoisson = torch.clamp(imgPoisson, 0, 1)
31
+ gain_index = str(random.randrange(int(first_index), int(end_index)))
32
+ h, w, c = img.shape
33
+
34
+ for i in range(c):
35
+ syn_noise_R = torch.zeros([h, w], device=img.device)
36
+ channel = str(i)
37
+ pu_K = K_tableCV72_load_json[gain_index][channel]['K']
38
+ pu_std = GS_tableCV72_load_json[gain_index][channel]['std']
39
+ pu_row_std = GS_tableCV72_load_json[gain_index][channel]['row_std']# std of row noise
40
+ generated_row = torch.normal(0, pu_row_std, size=(h, 1))
41
+ for j in range(h):
42
+ gen_row_noisy = torch.normal(float(generated_row[j]), torch.tensor(pu_std), size=(1, w))
43
+ syn_noise_R[j] = gen_row_noisy
44
+ img[:, :, i] = img[:, :, i] + (imgPoisson[:, :, i] - torch.poisson(imgPoisson[:, :, i] / pu_K) * pu_K) ## Don't add 200
45
+ img[:, :, i] += syn_noise_R
46
+ img[:, :, i] = torch.clamp(img[:, :, i], 0, 1)
47
+ return img
48
+
49
+ def CV72fillCurve_np(img, first_index, end_index):
50
+ with open('customs/k_table_AMBA_4k0111_update.json') as f:
51
+ K_tableCV72_load_json = json.load(f)
52
+ with open('customs/gaussian_table_AMBA_4k0111_update.json') as f:
53
+ GS_tableCV72_load_json = json.load(f)
54
+ img = np.copy(img)
55
+ imgPoisson = img - (200/4096)
56
+ imgPoisson = np.clip(imgPoisson, 0, 1)
57
+ gain_index = str(random.randrange(int(first_index), int(end_index)))
58
+ h, w, c = img.shape
59
+
60
+ for i in range(c):
61
+ syn_noise_R = np.zeros([h, w])
62
+ channel = str(i)
63
+ pu_K = K_tableCV72_load_json[gain_index][channel]['K']
64
+ pu_std = GS_tableCV72_load_json[gain_index][channel]['std']
65
+ pu_row_std = GS_tableCV72_load_json[gain_index][channel]['row_std']# std of row noise
66
+ generated_row = np.random.normal(0, pu_row_std, size=(h, 1))
67
+ for j in range(h):
68
+ gen_row_noisy = np.random.normal(float(generated_row[j]), pu_std, size=(1, w))
69
+ syn_noise_R[j] = gen_row_noisy
70
+ img[:, :, i] = img[:, :, i] + (imgPoisson[:, :, i] - np.random.poisson(imgPoisson[:, :, i] / pu_K) * pu_K) ## Don't add 200
71
+ img[:, :, i] += syn_noise_R
72
+ img[:, :, i] = np.clip(img[:, :, i], 0, 1)
73
+ return img
74
+
75
+ def rgb2rggb(img):
76
+ red = img[:, :, 0]
77
+ green1 = img[:, :, 1]
78
+ green2 = img[:, :, 1]
79
+ blue = img[:, :, 2]
80
+ rggb = torch.stack([red, green1, green2, blue], dim=2)
81
+ return rggb
82
+
83
+ def rggb2rgb(img):
84
+ red = img[:, :, 0]
85
+ green = img[:, :, 1] / 2 + img[:, :, 2] / 2
86
+ blue = img[:, :, 3]
87
+ rgb = torch.stack([red, green, blue], dim=2)
88
+ return rgb
89
+
90
+ def rggb2rgb_np(img):
91
+ red = img[:, :, 0]
92
+ green = img[:, :, 1] / 2 + img[:, :, 2] / 2
93
+ blue = img[:, :, 3]
94
+ rgb = np.stack([red, green, blue], axis=2)
95
+ return rgb
96
+
97
+ def rgb2rggb_np(img):
98
+ red = img[:, :, 0]
99
+ green1 = img[:, :, 1]
100
+ green2 = img[:, :, 1]
101
+ blue = img[:, :, 2]
102
+ rggb = np.stack([red, green1, green2, blue], axis=2)
103
+ return rggb
customs/weights/model_ir_0.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cbe0efce62523da961e1a221a64e302ff458be834ed1741a50066337bc29177
3
+ size 1459684
customs/weights/model_ir_0.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cf2983bd015272e0f34876c9c8486c6ee2a99af1a0530309f7ab39566a12d5
3
+ size 171724
customs/weights/model_ir_1.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4c80d19fe4b0cb6ef2915ab7bbdb2dabd40b7b4cf38d2d5d69114f011057cb
3
+ size 1459684
customs/weights/model_ir_1.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:533129654c5df08496311ffdbf9387eb9b83afd08108a607bf7eca1b61b70773
3
+ size 171724
customs/weights/model_ir_2.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8cc0142d26d02a2e885a802ea92f3b617861a30c9e309f636b04c7cad0fb033
3
+ size 1459684
customs/weights/model_ir_2.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57c4ebb69702107caa23a24710e33d8c6e26381b02a69b7375fb67a98d1e77d4
3
+ size 171724
customs/weights/model_ir_3.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7335eccfb89a5cd560dbc905a4b7b280d6060fd381b60fa7b47b9c36380d5473
3
+ size 1459684
customs/weights/model_ir_3.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bfe8cffa0c854e7c533de4b09bbc9ce707e009c6a0a5b9571bc4372f2adb02a
3
+ size 171724
customs/weights/model_ir_4.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:323115dcfeb2b144d3ed904990caef4d204e5fa4f0b9a2100429555aee492144
3
+ size 1459684
customs/weights/model_ir_4.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac50ba380a99b4022e951a6dc4e344baae5ee76936c349132ea51e46c95ba7f6
3
+ size 171724
customs/weights/model_ir_SIDD.bin.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53e3e0403e239258e724ee3e97766b39c463f313ac56acdf34cb13f6392d895
3
+ size 1459684
customs/weights/model_ir_SIDD.xml.encrypted ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54454cd30998abcf2ae63d9227e62744aa111063ddca5c4b7c950382b5c9ab31
3
+ size 171724
data/RGB/4Card.png ADDED

Git LFS Details

  • SHA256: c607eb5873ff3f3f7f32033ca199e14e1ef61d8ee7306f6078ef1666a9c34d86
  • Pointer size: 132 Bytes
  • Size of remote file: 1.43 MB
data/RGB/Color.png ADDED

Git LFS Details

  • SHA256: df30c3a9379cb84ecc658b3ef5166aa77cdb4108b56399ed76c2316bf81cd2cb
  • Pointer size: 132 Bytes
  • Size of remote file: 1.7 MB
data/RGB/Focus.png ADDED

Git LFS Details

  • SHA256: cbf2fd02f277a2a8083ab4470f12ff487906bbb1840f90f75d65157c971f3ca1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.69 MB
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio==4.31.5
2
+ opencv-python==4.10.0.82
3
+ openvino==2023.3.0
4
+ torch==2.2.2
5
+ cryptography==42.0.8