dannyroxas commited on
Commit
c6268af
Β·
verified Β·
1 Parent(s): a5d9072

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +273 -331
app.py CHANGED
@@ -1,19 +1,63 @@
1
- import gradio as gr
2
  import tensorflow as tf
3
  import numpy as np
4
  import cv2
5
- import pickle
6
  import os
7
- import tempfile
8
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  class MultiAttributeClassifier:
11
  def __init__(self):
 
 
12
  self.models = {}
13
  self.encoders = {}
14
  self.gan_models = {}
15
- self.confidence_threshold = 0.6
16
- self.categories = ['content', 'style', 'time_of_day', 'weather']
 
 
 
 
 
 
17
  self.load_classification_models()
18
  self.load_gan_models()
19
 
@@ -48,7 +92,7 @@ class MultiAttributeClassifier:
48
  self.models[category] = tf.keras.models.load_model(
49
  model_path,
50
  compile=False,
51
- custom_objects=CUSTOM_OBJECTS
52
  )
53
  print(f" βœ… Loaded {category} with custom_objects")
54
  except Exception as e2:
@@ -165,7 +209,7 @@ class MultiAttributeClassifier:
165
  self.gan_models[model_name] = tf.keras.models.load_model(
166
  model_path,
167
  compile=False,
168
- custom_objects=CUSTOM_OBJECTS
169
  )
170
  print(f" βœ… Loaded with custom_objects (InstanceNormalization)")
171
  except Exception as e3:
@@ -205,169 +249,172 @@ class MultiAttributeClassifier:
205
 
206
  print(f"🎯 Successfully loaded {len(self.gan_models)} GAN models")
207
 
208
- def preprocess_image_for_classification(self, image_path):
209
- """Preprocess image for classification (224x224)"""
210
- img = cv2.imread(image_path)
211
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
212
- img = cv2.resize(img, (224, 224))
213
- img = img.astype(np.float32) / 255.0
214
- img = (img - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
215
- img = np.expand_dims(img, axis=0)
216
- return img
217
-
218
- def preprocess_image_for_gan(self, image_path, target_size=(256, 256)):
219
- """Preprocess image for GAN models (256x256, normalized to [-1,1])"""
220
- img = cv2.imread(image_path)
221
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
222
- img = cv2.resize(img, target_size)
223
- img = img.astype(np.float32)
224
- img = (img / 127.5) - 1.0 # Normalize to [-1, 1]
225
- img = np.expand_dims(img, axis=0)
226
- return img
227
-
228
- def postprocess_gan_output(self, generated_img):
229
- """Convert GAN output back to displayable image"""
230
- # Convert from [-1, 1] to [0, 255]
231
- img = (generated_img[0] + 1.0) * 127.5
232
- img = np.clip(img, 0, 255).astype(np.uint8)
233
- return Image.fromarray(img)
234
 
235
- def classify_image(self, image_path):
236
- """Classify image across all loaded attributes"""
237
- img = self.preprocess_image_for_classification(image_path)
238
- results = {}
239
 
240
  for category in self.categories:
241
  if category in self.models and category in self.encoders:
242
  try:
243
- # Get predictions
244
- predictions = self.models[category].predict(img, verbose=0)
245
- if len(predictions.shape) > 1:
246
- predictions = predictions[0]
247
 
248
- # Get class names
249
- class_names = list(self.encoders[category].keys())
250
- predicted_idx = np.argmax(predictions)
251
- confidence = float(predictions[predicted_idx])
252
- predicted_class = class_names[predicted_idx]
253
 
254
- # Get top predictions
255
- top_indices = np.argsort(predictions)[-3:][::-1]
256
- top_3 = [(class_names[i], float(predictions[i])) for i in top_indices]
257
 
258
- results[category] = {
259
- 'predicted_class': predicted_class,
260
- 'confidence': confidence,
261
- 'is_confident': confidence >= self.confidence_threshold,
262
- 'top_3': top_3
263
  }
264
-
265
  except Exception as e:
266
- results[category] = {
267
- 'predicted_class': 'error',
268
- 'confidence': 0.0,
269
- 'is_confident': False,
270
- 'error': str(e)
271
  }
 
 
 
 
 
 
 
 
 
272
 
273
- return results
274
 
275
- def apply_gan_transformation(self, image_path, transformation_type):
276
- """Apply GAN transformation to image"""
277
- if transformation_type not in self.gan_models:
278
- return None, f"GAN model '{transformation_type}' not available"
279
 
280
  try:
281
- # Preprocess image for GAN
282
- img = self.preprocess_image_for_gan(image_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
 
284
  # Apply transformation
285
- generated = self.gan_models[transformation_type].predict(img, verbose=0)
 
286
 
287
- # Postprocess result
288
- result_image = self.postprocess_gan_output(generated)
 
289
 
290
- return result_image, "Success"
291
 
292
  except Exception as e:
293
- return None, f"Error applying {transformation_type}: {str(e)}"
294
-
295
- def get_available_transfers(classification_results):
296
- """Get available style transfers based on classifications"""
297
- transfers = []
298
 
299
- for category, result in classification_results.items():
300
- if not result.get('is_confident', False):
301
- continue
302
-
303
- predicted = result['predicted_class']
304
- confidence = result['confidence']
305
 
306
- if category == 'time_of_day':
307
- if predicted == 'day':
308
- transfers.append({
309
- 'name': 'Day β†’ Night',
310
- 'gan_model': 'day_to_night',
311
- 'confidence': confidence,
312
- 'description': 'Transform daytime scene to nighttime'
 
313
  })
314
- elif predicted == 'night':
315
- transfers.append({
316
- 'name': 'Night β†’ Day',
317
- 'gan_model': 'night_to_day',
318
- 'confidence': confidence,
319
- 'description': 'Transform nighttime scene to daytime'
320
  })
321
 
322
- elif category == 'style':
323
- if predicted == 'photograph':
324
- transfers.append({
325
- 'name': 'Photo β†’ Japanese Art',
326
- 'gan_model': 'photo_to_japanese',
327
- 'confidence': confidence,
328
- 'description': 'Convert realistic photo to Japanese ukiyo-e art style'
 
329
  })
330
- elif predicted == 'japanese_art':
331
- transfers.append({
332
- 'name': 'Japanese Art β†’ Photo',
333
- 'gan_model': 'japanese_to_photo',
334
- 'confidence': confidence,
335
- 'description': 'Convert artistic style to realistic photo'
336
  })
337
 
338
- elif category == 'weather':
339
- if predicted == 'foggy':
340
- transfers.append({
341
- 'name': 'Foggy β†’ Clear',
342
- 'gan_model': 'foggy_to_clear',
343
- 'confidence': confidence,
344
- 'description': 'Remove fog and enhance visibility'
345
- })
346
- elif predicted == 'clear':
347
- transfers.append({
348
- 'name': 'Clear β†’ Foggy',
349
- 'gan_model': 'clear_to_foggy',
350
- 'confidence': confidence,
351
- 'description': 'Add atmospheric fog effect'
 
 
 
 
 
 
 
 
 
 
 
352
  })
353
-
354
- # Add season transfers (these work regardless of season classification)
355
- transfers.extend([
356
- {
357
- 'name': 'Add Winter Atmosphere',
358
- 'gan_model': 'summer_to_winter',
359
- 'confidence': 0.8,
360
- 'description': 'Transform scene to winter with snow and cold atmosphere'
361
- },
362
- {
363
- 'name': 'Add Summer Atmosphere',
364
- 'gan_model': 'winter_to_summer',
365
- 'confidence': 0.8,
366
- 'description': 'Transform scene to summer with warm, lush atmosphere'
367
- }
368
- ])
369
-
370
- return transfers
371
 
372
  # Initialize classifier globally
373
  print("πŸš€ Starting StyleTransfer App...")
@@ -382,233 +429,128 @@ if len(classifier.gan_models) > 0:
382
  print("="*50)
383
 
384
  def analyze_image(image):
385
- """Main analysis function"""
386
  if image is None:
387
- return "Please upload an image first!", gr.update(choices=[], visible=False), gr.update(visible=False)
388
-
389
- # Save uploaded image to temporary file
390
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
391
- image.save(tmp_file.name)
392
- temp_path = tmp_file.name
393
 
394
  try:
395
- # Get classifications
396
- results = classifier.classify_image(temp_path)
397
 
398
  # Format analysis results
399
- analysis_text = "## πŸ“Š Image Analysis Results\n\n"
400
-
401
- for category, result in results.items():
402
- if 'error' not in result:
403
- confidence = result['confidence']
404
- status = "βœ… CONFIDENT" if result['is_confident'] else "⚠️ UNCERTAIN"
405
- predicted = result['predicted_class']
406
-
407
- analysis_text += f"**{category.replace('_', ' ').title()}:** {predicted} ({confidence:.1%}) {status}\n\n"
408
-
409
- # Show top alternatives
410
- if len(result['top_3']) > 1:
411
- alt_text = ", ".join([f"{name} ({score:.1%})" for name, score in result['top_3'][1:]])
412
- analysis_text += f" *Alternatives: {alt_text}*\n\n"
413
- else:
414
- analysis_text += f"**{category.replace('_', ' ').title()}:** Error - {result.get('error', 'Unknown error')}\n\n"
415
 
416
- # Get available transfers
417
- transfers = get_available_transfers(results)
418
 
419
- if transfers:
 
 
420
  analysis_text += "## 🎨 Available Style Transfers\n\n"
421
- transfer_choices = []
422
-
423
- for transfer in transfers:
424
- analysis_text += f"**{transfer['name']}** ({transfer['confidence']:.1%})\n"
425
- analysis_text += f"*{transfer['description']}*\n\n"
426
- transfer_choices.append(transfer['name'])
427
  else:
428
- analysis_text += "## ⚠️ No Style Transfers Available\n\n"
429
- analysis_text += "Could not confidently classify the image for available transformations.\n\n"
430
- transfer_choices = []
431
 
432
- return (
433
- analysis_text,
434
- gr.update(choices=transfer_choices, visible=len(transfer_choices) > 0),
435
- gr.update(visible=len(transfer_choices) > 0)
436
- )
437
-
438
  except Exception as e:
439
- return f"Error analyzing image: {str(e)}", gr.update(choices=[], visible=False), gr.update(visible=False)
440
-
441
- finally:
442
- # Clean up temp file
443
- if os.path.exists(temp_path):
444
- os.unlink(temp_path)
445
 
446
- def apply_style_transfer(original_image, selected_transfers):
447
- """Apply selected style transfers using actual GAN models"""
448
- if not selected_transfers:
449
- return original_image, "⚠️ No transfers selected!"
450
-
451
- if original_image is None:
452
- return None, "⚠️ No image provided!"
453
 
454
- # Save original image to temp file
455
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
456
- original_image.save(tmp_file.name)
457
- temp_path = tmp_file.name
458
 
459
- result_text = "## 🎨 Applied Transformations\n\n"
460
- current_image = original_image
461
 
462
- try:
463
- # Map transfer names to GAN models
464
- transfer_mapping = {
465
- 'Day β†’ Night': 'day_to_night',
466
- 'Night β†’ Day': 'night_to_day',
467
- 'Photo β†’ Japanese Art': 'photo_to_japanese',
468
- 'Japanese Art β†’ Photo': 'japanese_to_photo',
469
- 'Foggy β†’ Clear': 'foggy_to_clear',
470
- 'Clear β†’ Foggy': 'clear_to_foggy',
471
- 'Add Winter Atmosphere': 'summer_to_winter',
472
- 'Add Summer Atmosphere': 'winter_to_summer'
473
- }
474
-
475
- # Apply each selected transformation
476
- for transfer_name in selected_transfers:
477
- if transfer_name in transfer_mapping:
478
- gan_model = transfer_mapping[transfer_name]
479
-
480
- # Apply transformation
481
- transformed_image, status = classifier.apply_gan_transformation(temp_path, gan_model)
482
-
483
- if transformed_image is not None:
484
- result_text += f"βœ… **{transfer_name}** - {status}\n"
485
- current_image = transformed_image
486
-
487
- # Save transformed image for next transformation
488
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as new_tmp:
489
- transformed_image.save(new_tmp.name)
490
- if os.path.exists(temp_path):
491
- os.unlink(temp_path)
492
- temp_path = new_tmp.name
493
- else:
494
- result_text += f"❌ **{transfer_name}** - {status}\n"
495
  else:
496
- result_text += f"⚠️ **{transfer_name}** - Transfer not implemented\n"
497
-
498
- result_text += f"\nπŸŽ‰ **Transformation{'s' if len(selected_transfers) > 1 else ''} complete!**\n\n"
499
-
500
- if len(selected_transfers) > 1:
501
- result_text += "*Multiple transformations were applied in sequence for a combined effect.*"
502
-
503
- return current_image, result_text
504
-
505
- except Exception as e:
506
- return original_image, f"❌ Error during transformation: {str(e)}"
507
 
508
- finally:
509
- # Clean up temp file
510
- if os.path.exists(temp_path):
511
- os.unlink(temp_path)
512
 
513
- # Create Gradio interface
514
- demo = gr.Blocks(title="🎨 Intelligent Style Transfer System")
 
 
 
 
 
515
 
516
- with demo:
517
- gr.Markdown("""
518
- # 🎨 Intelligent Multi-Attribute Style Transfer
519
-
520
- Upload an image and our AI will analyze multiple attributes (content, style, time, weather)
521
- and suggest relevant style transfers using trained GAN models!
522
 
523
- **Available Transformations:**
524
- - πŸŒ… Day ↔ Night conversion (CycleGAN)
525
- - 🎨 Photo ↔ Japanese Ukiyo-e art style (CycleGAN)
526
- - 🌫️ Foggy ↔ Clear weather transformation (CycleGAN)
527
- - 🌿 Summer ↔ Winter seasonal atmosphere (CycleGAN)
528
- """)
529
 
530
  with gr.Row():
531
  with gr.Column(scale=1):
532
- input_image = gr.Image(
533
- type="pil",
534
- label="πŸ“€ Upload Your Image"
535
- )
536
 
537
- analyze_btn = gr.Button(
538
- "πŸ” Analyze Image",
539
- variant="primary"
540
- )
541
-
542
  with gr.Column(scale=1):
543
- analysis_output = gr.Markdown(
544
- value="Upload an image and click 'Analyze Image' to see what our AI detects!"
 
 
 
545
  )
546
 
547
  with gr.Row():
548
- transfer_selector = gr.CheckboxGroup(
549
- label="🎨 Select Style Transfers to Apply",
550
- choices=[],
551
- visible=False
552
- )
553
-
554
- with gr.Row():
555
- apply_btn = gr.Button(
556
- "✨ Apply Selected Transfers",
557
- variant="secondary",
558
- visible=False
559
- )
560
-
561
  with gr.Row():
562
- with gr.Column(scale=1):
563
- output_image = gr.Image(
564
- label="πŸŽ‰ Transformed Image"
565
- )
566
 
567
- with gr.Column(scale=1):
568
- result_output = gr.Markdown(
569
- value="Select transfers and click 'Apply' to see the magic happen!"
570
- )
 
 
 
 
 
571
 
572
- # Connect the interface
573
  analyze_btn.click(
574
  fn=analyze_image,
575
- inputs=[input_image],
576
- outputs=[analysis_output, transfer_selector, apply_btn]
577
  )
578
 
579
  apply_btn.click(
580
- fn=apply_style_transfer,
581
- inputs=[input_image, transfer_selector],
582
- outputs=[output_image, result_output]
583
  )
584
-
585
- gr.Markdown("""
586
- ---
587
- ### πŸ”§ Technical Details
588
-
589
- This system uses multiple trained models:
590
-
591
- **Classification Models:**
592
- - **Content classifier**: Human vs Landscape (97% accuracy)
593
- - **Style classifier**: Photograph vs Japanese Art (92% accuracy)
594
- - **Time classifier**: Day vs Night (90% accuracy)
595
- - **Weather classifier**: Foggy vs Clear (85% accuracy)
596
-
597
- **GAN Models:**
598
- - **Day/Night**: CycleGAN for time-of-day transformation
599
- - **Style Transfer**: CycleGAN for photo ↔ Japanese art conversion
600
- - **Weather**: CycleGAN for fog removal/addition
601
- - **Seasons**: CycleGAN for summer ↔ winter atmosphere
602
-
603
- Only confident predictions (>60%) trigger relevant style transfer suggestions.
604
- Multiple transformations can be combined for creative effects!
605
- """)
606
 
607
- # Launch configuration for Hugging Face Spaces
608
  if __name__ == "__main__":
609
- demo.launch(
610
- server_name="0.0.0.0",
611
- server_port=7860,
612
- share=True,
613
- show_error=True
614
- )
 
 
1
  import tensorflow as tf
2
  import numpy as np
3
  import cv2
 
4
  import os
5
+ import pickle
6
  from PIL import Image
7
+ import gradio as gr
8
+
9
+ # CRITICAL: Define the custom InstanceNormalization layer used in training
10
+ class InstanceNormalization(tf.keras.layers.Layer):
11
+ def __init__(self, epsilon=1e-5, **kwargs):
12
+ super(InstanceNormalization, self).__init__(**kwargs)
13
+ self.epsilon = epsilon
14
+
15
+ def build(self, input_shape):
16
+ depth = input_shape[-1]
17
+ self.scale = self.add_weight(
18
+ shape=[depth],
19
+ initializer=tf.random_normal_initializer(1., 0.02),
20
+ trainable=True,
21
+ name='scale'
22
+ )
23
+ self.offset = self.add_weight(
24
+ shape=[depth],
25
+ initializer='zeros',
26
+ trainable=True,
27
+ name='offset'
28
+ )
29
+ super().build(input_shape)
30
+
31
+ def call(self, x):
32
+ mean, variance = tf.nn.moments(x, axes=[1, 2], keepdims=True)
33
+ inv = tf.math.rsqrt(variance + self.epsilon)
34
+ normalized = (x - mean) * inv
35
+ return self.scale * normalized + self.offset
36
+
37
+ def get_config(self):
38
+ config = super().get_config()
39
+ config.update({'epsilon': self.epsilon})
40
+ return config
41
+
42
+ # Set up TensorFlow for compatibility
43
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
44
+ tf.keras.mixed_precision.set_global_policy('float32')
45
 
46
  class MultiAttributeClassifier:
47
  def __init__(self):
48
+ # Define categories for classification
49
+ self.categories = ['content', 'style', 'time_of_day', 'weather']
50
  self.models = {}
51
  self.encoders = {}
52
  self.gan_models = {}
53
+
54
+ # Define custom objects FIRST (before using them)
55
+ self.custom_objects = {
56
+ 'InstanceNormalization': InstanceNormalization,
57
+ 'tf': tf
58
+ }
59
+
60
+ # Load models
61
  self.load_classification_models()
62
  self.load_gan_models()
63
 
 
92
  self.models[category] = tf.keras.models.load_model(
93
  model_path,
94
  compile=False,
95
+ custom_objects=self.custom_objects
96
  )
97
  print(f" βœ… Loaded {category} with custom_objects")
98
  except Exception as e2:
 
209
  self.gan_models[model_name] = tf.keras.models.load_model(
210
  model_path,
211
  compile=False,
212
+ custom_objects=self.custom_objects
213
  )
214
  print(f" βœ… Loaded with custom_objects (InstanceNormalization)")
215
  except Exception as e3:
 
249
 
250
  print(f"🎯 Successfully loaded {len(self.gan_models)} GAN models")
251
 
252
+ def preprocess_image(self, image):
253
+ """Preprocess image for model input"""
254
+ if isinstance(image, str):
255
+ image = Image.open(image)
256
+ elif isinstance(image, np.ndarray):
257
+ image = Image.fromarray(image)
258
+
259
+ # Resize image
260
+ image = image.resize((224, 224))
261
+
262
+ # Convert to array and normalize
263
+ img_array = np.array(image)
264
+ if img_array.shape[-1] == 4: # RGBA
265
+ img_array = img_array[:, :, :3] # Remove alpha channel
266
+
267
+ # Normalize to [0, 1]
268
+ img_array = img_array.astype(np.float32) / 255.0
269
+
270
+ # Add batch dimension
271
+ img_array = np.expand_dims(img_array, axis=0)
272
+
273
+ return img_array
 
 
 
 
274
 
275
+ def predict_attributes(self, image):
276
+ """Predict multiple attributes of an image"""
277
+ preprocessed = self.preprocess_image(image)
278
+ predictions = {}
279
 
280
  for category in self.categories:
281
  if category in self.models and category in self.encoders:
282
  try:
283
+ # Get model prediction
284
+ pred = self.models[category].predict(preprocessed, verbose=0)
 
 
285
 
286
+ # Get predicted class
287
+ predicted_class_idx = np.argmax(pred, axis=1)[0]
288
+ confidence = float(np.max(pred))
 
 
289
 
290
+ # Get class name from encoder
291
+ class_name = self.encoders[category].classes_[predicted_class_idx]
 
292
 
293
+ predictions[category] = {
294
+ 'class': class_name,
295
+ 'confidence': confidence
 
 
296
  }
 
297
  except Exception as e:
298
+ print(f"Error predicting {category}: {e}")
299
+ predictions[category] = {
300
+ 'class': 'unknown',
301
+ 'confidence': 0.0
 
302
  }
303
+ else:
304
+ # Fallback predictions if models not loaded
305
+ fallback_predictions = {
306
+ 'content': {'class': 'outdoor', 'confidence': 0.6},
307
+ 'style': {'class': 'realistic', 'confidence': 0.7},
308
+ 'time_of_day': {'class': 'day', 'confidence': 0.8},
309
+ 'weather': {'class': 'clear', 'confidence': 0.8}
310
+ }
311
+ predictions[category] = fallback_predictions.get(category, {'class': 'unknown', 'confidence': 0.0})
312
 
313
+ return predictions
314
 
315
+ def apply_style_transfer(self, image, transformation):
316
+ """Apply style transfer using trained GAN models"""
317
+ if transformation not in self.gan_models:
318
+ return None, f"GAN model '{transformation}' not available"
319
 
320
  try:
321
+ # Preprocess image for GAN (256x256, normalized to [-1, 1])
322
+ if isinstance(image, str):
323
+ img = Image.open(image)
324
+ elif isinstance(image, np.ndarray):
325
+ img = Image.fromarray(image)
326
+ else:
327
+ img = image
328
+
329
+ # Resize to 256x256 for GAN
330
+ img = img.resize((256, 256))
331
+ img_array = np.array(img)
332
+
333
+ if img_array.shape[-1] == 4: # RGBA
334
+ img_array = img_array[:, :, :3] # Remove alpha channel
335
+
336
+ # Normalize to [-1, 1] for GAN
337
+ img_array = (img_array.astype(np.float32) / 127.5) - 1.0
338
+ img_array = np.expand_dims(img_array, axis=0)
339
 
340
  # Apply transformation
341
+ model = self.gan_models[transformation]
342
+ generated = model.predict(img_array, verbose=0)
343
 
344
+ # Denormalize and convert back to image
345
+ generated = (generated[0] + 1.0) * 127.5
346
+ generated = np.clip(generated, 0, 255).astype(np.uint8)
347
 
348
+ return generated, "Transformation completed!"
349
 
350
  except Exception as e:
351
+ print(f"Error in style transfer: {e}")
352
+ return None, f"Error: {str(e)}"
 
 
 
353
 
354
+ def get_style_recommendations(self, predictions):
355
+ """Get style transfer recommendations based on predictions"""
356
+ recommendations = []
 
 
 
357
 
358
+ # Time-based recommendations
359
+ if 'time_of_day' in predictions:
360
+ time_pred = predictions['time_of_day']
361
+ if time_pred['class'] == 'day' and time_pred['confidence'] > 0.7:
362
+ recommendations.append({
363
+ 'transformation': 'day_to_night',
364
+ 'confidence': time_pred['confidence'],
365
+ 'description': f"Transform scene to night with {time_pred['confidence']*100:.0f}% confidence"
366
  })
367
+ elif time_pred['class'] == 'night' and time_pred['confidence'] > 0.7:
368
+ recommendations.append({
369
+ 'transformation': 'night_to_day',
370
+ 'confidence': time_pred['confidence'],
371
+ 'description': f"Transform scene to day with {time_pred['confidence']*100:.0f}% confidence"
 
372
  })
373
 
374
+ # Weather-based recommendations
375
+ if 'weather' in predictions:
376
+ weather_pred = predictions['weather']
377
+ if weather_pred['class'] == 'clear' and weather_pred['confidence'] > 0.6:
378
+ recommendations.append({
379
+ 'transformation': 'clear_to_foggy',
380
+ 'confidence': weather_pred['confidence'],
381
+ 'description': f"Add fog atmosphere with {weather_pred['confidence']*100:.0f}% confidence"
382
  })
383
+ elif weather_pred['class'] == 'foggy' and weather_pred['confidence'] > 0.6:
384
+ recommendations.append({
385
+ 'transformation': 'foggy_to_clear',
386
+ 'confidence': weather_pred['confidence'],
387
+ 'description': f"Clear fog from scene with {weather_pred['confidence']*100:.0f}% confidence"
 
388
  })
389
 
390
+ # Content-based recommendations
391
+ if 'content' in predictions:
392
+ content_pred = predictions['content']
393
+ if content_pred['class'] in ['outdoor', 'landscape'] and content_pred['confidence'] > 0.6:
394
+ recommendations.extend([
395
+ {
396
+ 'transformation': 'summer_to_winter',
397
+ 'confidence': 0.8,
398
+ 'description': f"Transform scene to winter with snow and cold atmosphere"
399
+ },
400
+ {
401
+ 'transformation': 'winter_to_summer',
402
+ 'confidence': 0.8,
403
+ 'description': f"Transform scene to summer with warm, lush atmosphere"
404
+ }
405
+ ])
406
+
407
+ # Style-based recommendations
408
+ if 'style' in predictions:
409
+ style_pred = predictions['style']
410
+ if style_pred['class'] == 'realistic' and style_pred['confidence'] > 0.6:
411
+ recommendations.append({
412
+ 'transformation': 'photo_to_japanese',
413
+ 'confidence': style_pred['confidence'],
414
+ 'description': f"Transform to Japanese ukiyo-e art style"
415
  })
416
+
417
+ return recommendations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
 
419
  # Initialize classifier globally
420
  print("πŸš€ Starting StyleTransfer App...")
 
429
  print("="*50)
430
 
431
  def analyze_image(image):
432
+ """Analyze uploaded image and provide style recommendations"""
433
  if image is None:
434
+ return "Please upload an image first.", "", []
 
 
 
 
 
435
 
436
  try:
437
+ # Get predictions for all attributes
438
+ predictions = classifier.predict_attributes(image)
439
 
440
  # Format analysis results
441
+ analysis_text = "## πŸ” Image Analysis Results\n\n"
442
+ for category, pred in predictions.items():
443
+ confidence_pct = pred['confidence'] * 100
444
+ analysis_text += f"**{category.replace('_', ' ').title()}:** {pred['class']} ({confidence_pct:.1f}% confidence)\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
445
 
446
+ # Get style recommendations
447
+ recommendations = classifier.get_style_recommendations(predictions)
448
 
449
+ # Format recommendations for display
450
+ rec_choices = []
451
+ if recommendations:
452
  analysis_text += "## 🎨 Available Style Transfers\n\n"
453
+ for rec in recommendations:
454
+ analysis_text += f"**{rec['transformation'].replace('_', ' β†’ ').title()}** ({rec['confidence']*100:.0f}%) {rec['description']}\n\n"
455
+ rec_choices.append(rec['transformation'])
 
 
 
456
  else:
457
+ analysis_text += "No specific style transfer recommendations based on this image.\n"
458
+
459
+ return analysis_text, gr.update(choices=rec_choices, value=None, visible=True), []
460
 
 
 
 
 
 
 
461
  except Exception as e:
462
+ print(f"Error in analysis: {e}")
463
+ import traceback
464
+ traceback.print_exc()
465
+ return f"Error analyzing image: {str(e)}", gr.update(visible=False), []
 
 
466
 
467
+ def apply_transformations(image, selected_transformations):
468
+ """Apply selected style transformations"""
469
+ if image is None:
470
+ return "Please upload an image first.", []
 
 
 
471
 
472
+ if not selected_transformations:
473
+ return "Please select at least one transformation.", []
 
 
474
 
475
+ results = []
476
+ status_messages = []
477
 
478
+ for transformation in selected_transformations:
479
+ try:
480
+ transformed_img, message = classifier.apply_style_transfer(image, transformation)
481
+ if transformed_img is not None:
482
+ results.append(transformed_img)
483
+ status_messages.append(f"βœ… {transformation.replace('_', ' β†’ ').title()}: {message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
  else:
485
+ status_messages.append(f"❌ {transformation.replace('_', ' β†’ ').title()}: {message}")
486
+ except Exception as e:
487
+ status_messages.append(f"❌ {transformation}: Error - {str(e)}")
 
 
 
 
 
 
 
 
488
 
489
+ status_text = "\n".join(status_messages)
490
+ return status_text, results
 
 
491
 
492
+ # Available transformations for manual selection
493
+ available_transformations = [
494
+ "day_to_night", "night_to_day",
495
+ "clear_to_foggy", "foggy_to_clear",
496
+ "photo_to_japanese", "japanese_to_photo",
497
+ "summer_to_winter", "winter_to_summer"
498
+ ]
499
 
500
+ # Create Gradio interface
501
+ with gr.Blocks(title="Intelligent Multi-Attribute Style Transfer", theme=gr.themes.Soft()) as demo:
502
+ gr.Markdown("# 🎨 Intelligent Multi-Attribute Style Transfer")
503
+ gr.Markdown("Upload an image and our AI will analyze multiple attributes (content, style, time, weather) and suggest relevant style transfers using trained GAN models!")
 
 
504
 
505
+ # Show available transformations
506
+ gr.Markdown("## Available Transformations:")
507
+ gr.Markdown("β€’ πŸŒ… Day ↔ Night conversion (CycleGAN)")
508
+ gr.Markdown("β€’ 🎨 Photo ↔ Japanese ukiyo-e art style (CycleGAN)")
509
+ gr.Markdown("β€’ 🌫️ Foggy ↔ Clear weather transformation (CycleGAN)")
510
+ gr.Markdown("β€’ 🌿 Summer ↔ Winter seasonal atmosphere (CycleGAN)")
511
 
512
  with gr.Row():
513
  with gr.Column(scale=1):
514
+ image_input = gr.Image(label="πŸ“€ Upload Your Image", type="pil")
515
+ analyze_btn = gr.Button("πŸ” Analyze Image", variant="primary")
 
 
516
 
 
 
 
 
 
517
  with gr.Column(scale=1):
518
+ analysis_output = gr.Markdown("## πŸ“Š Image Analysis Results", label="Analysis Results")
519
+ recommendations = gr.CheckboxGroup(
520
+ choices=[],
521
+ label="🎨 Available Style Transfers",
522
+ visible=False
523
  )
524
 
525
  with gr.Row():
526
+ with gr.Column():
527
+ apply_btn = gr.Button("🎯 Apply Selected Transfers", variant="secondary")
528
+
 
 
 
 
 
 
 
 
 
 
529
  with gr.Row():
530
+ status_output = gr.Textbox(label="πŸ“‹ Applied Transformations", interactive=False)
 
 
 
531
 
532
+ with gr.Row():
533
+ results_gallery = gr.Gallery(
534
+ label="πŸ–ΌοΈ Transformed Images",
535
+ show_label=True,
536
+ elem_id="gallery",
537
+ columns=2,
538
+ rows=2,
539
+ height="auto"
540
+ )
541
 
542
+ # Event handlers
543
  analyze_btn.click(
544
  fn=analyze_image,
545
+ inputs=[image_input],
546
+ outputs=[analysis_output, recommendations, results_gallery]
547
  )
548
 
549
  apply_btn.click(
550
+ fn=apply_transformations,
551
+ inputs=[image_input, recommendations],
552
+ outputs=[status_output, results_gallery]
553
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554
 
 
555
  if __name__ == "__main__":
556
+ demo.launch(server_name="0.0.0.0", server_port=7860)