Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1655,48 +1655,48 @@ class StyleTransferSystem:
|
|
| 1655 |
return model
|
| 1656 |
|
| 1657 |
def apply_adain_style(self, content_image, style_image, model, alpha=1.0):
|
| 1658 |
-
|
| 1659 |
-
|
| 1660 |
-
|
| 1661 |
-
|
| 1662 |
-
try:
|
| 1663 |
-
model = model.to(self.device)
|
| 1664 |
-
model.eval()
|
| 1665 |
-
|
| 1666 |
-
original_size = content_image.size
|
| 1667 |
-
|
| 1668 |
-
# This maintains the full image but may distort aspect ratio
|
| 1669 |
-
transform = transforms.Compose([
|
| 1670 |
-
transforms.Resize((256, 256)), # Resize to exact dimensions
|
| 1671 |
-
transforms.ToTensor(),
|
| 1672 |
-
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 1673 |
-
std=[0.229, 0.224, 0.225])
|
| 1674 |
-
])
|
| 1675 |
-
|
| 1676 |
-
|
| 1677 |
-
content_tensor = transform(content_image).unsqueeze(0).to(self.device)
|
| 1678 |
-
style_tensor = transform(style_image).unsqueeze(0).to(self.device)
|
| 1679 |
|
| 1680 |
-
|
| 1681 |
-
|
|
|
|
| 1682 |
|
| 1683 |
-
|
| 1684 |
-
output = output.squeeze(0).cpu()
|
| 1685 |
-
output = output * torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
|
| 1686 |
-
output = output + torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
|
| 1687 |
-
output = torch.clamp(output, 0, 1)
|
| 1688 |
|
| 1689 |
-
#
|
| 1690 |
-
|
| 1691 |
-
|
| 1692 |
-
|
| 1693 |
-
|
| 1694 |
-
|
| 1695 |
-
|
| 1696 |
-
|
| 1697 |
-
|
| 1698 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1699 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1700 |
# ===========================
|
| 1701 |
# HELPER FUNCTIONS
|
| 1702 |
# ===========================
|
|
|
|
| 1655 |
return model
|
| 1656 |
|
| 1657 |
def apply_adain_style(self, content_image, style_image, model, alpha=1.0):
|
| 1658 |
+
"""Apply AdaIN-based style transfer"""
|
| 1659 |
+
if content_image is None or style_image is None or model is None:
|
| 1660 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1661 |
|
| 1662 |
+
try:
|
| 1663 |
+
model = model.to(self.device)
|
| 1664 |
+
model.eval()
|
| 1665 |
|
| 1666 |
+
original_size = content_image.size
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1667 |
|
| 1668 |
+
# This maintains the full image but may distort aspect ratio
|
| 1669 |
+
transform = transforms.Compose([
|
| 1670 |
+
transforms.Resize((256, 256)), # Resize to exact dimensions
|
| 1671 |
+
transforms.ToTensor(),
|
| 1672 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 1673 |
+
std=[0.229, 0.224, 0.225])
|
| 1674 |
+
])
|
| 1675 |
+
|
| 1676 |
+
|
| 1677 |
+
content_tensor = transform(content_image).unsqueeze(0).to(self.device)
|
| 1678 |
+
style_tensor = transform(style_image).unsqueeze(0).to(self.device)
|
| 1679 |
+
|
| 1680 |
+
with torch.no_grad():
|
| 1681 |
+
output = model(content_tensor, style_tensor, alpha=alpha)
|
| 1682 |
+
|
| 1683 |
+
# Denormalize
|
| 1684 |
+
output = output.squeeze(0).cpu()
|
| 1685 |
+
output = output * torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1)
|
| 1686 |
+
output = output + torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1)
|
| 1687 |
+
output = torch.clamp(output, 0, 1)
|
| 1688 |
+
|
| 1689 |
+
# Convert to PIL
|
| 1690 |
+
output_img = transforms.ToPILImage()(output)
|
| 1691 |
+
output_img = output_img.resize(original_size, Image.LANCZOS)
|
| 1692 |
+
|
| 1693 |
+
return output_img
|
| 1694 |
|
| 1695 |
+
except Exception as e:
|
| 1696 |
+
print(f"Error applying AdaIN style: {e}")
|
| 1697 |
+
traceback.print_exc()
|
| 1698 |
+
return None
|
| 1699 |
+
|
| 1700 |
# ===========================
|
| 1701 |
# HELPER FUNCTIONS
|
| 1702 |
# ===========================
|