AaronCIH commited on
Commit
48b514b
·
verified ·
1 Parent(s): 04a2b8d

Update scripts/1_generate_iqa_brief.py

Browse files
Files changed (1) hide show
  1. scripts/1_generate_iqa_brief.py +49 -12
scripts/1_generate_iqa_brief.py CHANGED
@@ -10,7 +10,7 @@ import re
10
  import json
11
 
12
  # ir dataset dictionary:
13
- base_rt = f'/home/CORP/hsiang.chen/Project/Datasets/IR'
14
  dataset_dict = {
15
  # Image Restoration
16
  ## Super Resolution (3)
@@ -158,6 +158,24 @@ dataset_dict = {
158
  "UDC": {'val': 'Other/UDC/metas/val.list', # (60, pair)
159
  'test': 'Other/UDC/metas/test.list'}, # (60, pair)
160
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  }
162
 
163
  def IRImageData(listfile):
@@ -172,12 +190,20 @@ def IRImageData(listfile):
172
  # Dataset
173
  LQ_list = []
174
  HQ_list = []
 
175
  for data in paths:
176
  lq_pth, hq_pth, label = data
177
  if os.path.isfile(lq_pth):
178
  LQ_list.append(lq_pth)
 
 
 
179
  if os.path.isfile(hq_pth):
180
- HQ_list.append(hq_pth)
 
 
 
 
181
  return LQ_list, HQ_list
182
 
183
  # question dictionary:
@@ -313,10 +339,9 @@ def question_generate(ref="Full-Reference", degra="Common"):
313
  return template
314
 
315
  if __name__ == "__main__":
316
- for degradation, degra_dict in dataset_dict.items():
317
- for dname, ddict in degra_dict.items():
318
- for dset, list_path in ddict.items():
319
-
320
  meta_refA = []
321
  meta_A = []
322
  meta_syn = []
@@ -351,6 +376,18 @@ if __name__ == "__main__":
351
  else:
352
  image_A = None
353
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  meta_refA.append({
355
  "distortion_class": degradation,
356
  "distortion_name": degradation,
@@ -363,11 +400,11 @@ if __name__ == "__main__":
363
  "conversations": [
364
  {
365
  "from": "human",
366
- "value": question_generate(ref="Full-Reference", degra="ONE"),
367
  },
368
  {
369
  "from": "gpt",
370
- "value": degradation
371
  }
372
  ],
373
  })
@@ -384,11 +421,11 @@ if __name__ == "__main__":
384
  "conversations": [
385
  {
386
  "from": "human",
387
- "value": question_generate(ref="Non-Reference", degra="ONE"),
388
  },
389
  {
390
  "from": "gpt",
391
- "value": degradation
392
  }
393
  ],
394
  })
@@ -405,11 +442,11 @@ if __name__ == "__main__":
405
  "conversations": [
406
  {
407
  "from": "human",
408
- "value": question_generate(ref="Non-Reference", degra="ONE"),
409
  },
410
  {
411
  "from": "gpt",
412
- "value": degradation
413
  }
414
  ],
415
  })
 
10
  import json
11
 
12
  # ir dataset dictionary:
13
+ base_rt = f'/home/work/shared-fi-datasets-01/users/hsiang.chen/Project/Datasets/IR'
14
  dataset_dict = {
15
  # Image Restoration
16
  ## Super Resolution (3)
 
158
  "UDC": {'val': 'Other/UDC/metas/val.list', # (60, pair)
159
  'test': 'Other/UDC/metas/test.list'}, # (60, pair)
160
  },
161
+
162
+ ## Composite (1)
163
+ "Composite": {
164
+ "CDD": {
165
+ 'train': 'Composite/CDD11/metas/train.list', # (13013, pair)
166
+ 'test_haze': 'Composite/CDD11/metas/test_haze.list', # (200, pair)
167
+ 'test_haze_rain': 'Composite/CDD11/metas/test_haze_rain.list', # (200, pair)
168
+ 'test_haze_snow': 'Composite/CDD11/metas/test_haze_snow.list', # (200, pair)
169
+ 'test_low': 'Composite/CDD11/metas/test_low.list', # (200, pair)
170
+ 'test_low_haze': 'Composite/CDD11/metas/test_low_haze.list', # (200, pair)
171
+ 'test_low_haze_rain': 'Composite/CDD11/metas/test_low_haze_rain.list', # (200, pair)
172
+ 'test_low_haze_snow': 'Composite/CDD11/metas/test_low_haze_snow.list', # (200, pair)
173
+ 'test_low_rain': 'Composite/CDD11/metas/test_low_rain.list', # (200, pair)
174
+ 'test_low_snow': 'Composite/CDD11/metas/test_low_snow.list', # (200, pair)
175
+ 'test_rain': 'Composite/CDD11/metas/test_rain.list', # (200, pair)
176
+ 'test_snow': 'Composite/CDD11/metas/test_snow.list', # (200, pair)
177
+ },
178
+ },
179
  }
180
 
181
  def IRImageData(listfile):
 
190
  # Dataset
191
  LQ_list = []
192
  HQ_list = []
193
+ labels = []
194
  for data in paths:
195
  lq_pth, hq_pth, label = data
196
  if os.path.isfile(lq_pth):
197
  LQ_list.append(lq_pth)
198
+ else:
199
+ LQ_list.append(None)
200
+
201
  if os.path.isfile(hq_pth):
202
+ HQ_list.append(hq_pth)
203
+ else:
204
+ HQ_list.append(None)
205
+
206
+
207
  return LQ_list, HQ_list
208
 
209
  # question dictionary:
 
339
  return template
340
 
341
  if __name__ == "__main__":
342
+ for degradation, degra_dict in dataset_dict.items(): # Low-light, [LOL, DICM ...]
343
+ for dname, ddict in degra_dict.items(): # LOL, [train, test, ...]
344
+ for dset, list_path in ddict.items(): # train, path.
 
345
  meta_refA = []
346
  meta_A = []
347
  meta_syn = []
 
376
  else:
377
  image_A = None
378
 
379
+ if degradation == "Composite":
380
+ annotation = ", ".join(label.split("_"))
381
+ else:
382
+ annotation = degradation
383
+
384
+ if degradation == "Composite":
385
+ ref_question = question_generate(ref="Full-Reference", degra="Common")
386
+ nref_question = question_generate(ref="Non-Reference", degra="Common")
387
+ else:
388
+ ref_question = question_generate(ref="Full-Reference", degra="ONE")
389
+ nref_question = question_generate(ref="Non-Reference", degra="ONE")
390
+
391
  meta_refA.append({
392
  "distortion_class": degradation,
393
  "distortion_name": degradation,
 
400
  "conversations": [
401
  {
402
  "from": "human",
403
+ "value": ref_question,
404
  },
405
  {
406
  "from": "gpt",
407
+ "value": annotation
408
  }
409
  ],
410
  })
 
421
  "conversations": [
422
  {
423
  "from": "human",
424
+ "value": nref_question,
425
  },
426
  {
427
  "from": "gpt",
428
+ "value": annotation
429
  }
430
  ],
431
  })
 
442
  "conversations": [
443
  {
444
  "from": "human",
445
+ "value": nref_question,
446
  },
447
  {
448
  "from": "gpt",
449
+ "value": annotation
450
  }
451
  ],
452
  })