Spaces:
Sleeping
Sleeping
File size: 4,702 Bytes
da69390 0942f45 05c9258 da69390 105bb91 e72f092 ba17129 0942f45 da69390 4a2a785 da69390 05c9258 0942f45 da69390 a124ac7 da69390 05c9258 ba17129 60994cc ba17129 0942f45 60994cc da69390 ba17129 0942f45 ba17129 0942f45 da69390 ba17129 da69390 60994cc da69390 2e7c86b 60994cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import os
import streamlit as st
import tensorflow as tf
import numpy as np
from huggingface_hub import HfApi, hf_hub_download
from PIL import Image
from io import BytesIO
import requests
# Hugging Face credentials
api = HfApi()
# Set your Hugging Face username and model repository name
username = "Hammad712"
repo_name = "CycleGAN-Model"
repo_id = f"{username}/{repo_name}"
# Download model files from Hugging Face
local_dir = "CycleGAN" # Changed to a relative path
os.makedirs(local_dir, exist_ok=True)
for file in api.list_repo_files(repo_id=repo_id, repo_type="model"):
hf_hub_download(repo_id=repo_id, filename=file, local_dir=local_dir)
# Load the model
custom_objects = {'InstanceNormalization': tf.keras.layers.Layer} # Adjust custom objects as needed
loaded_model = tf.keras.models.load_model(local_dir, custom_objects=custom_objects)
# Helper functions
def load_and_preprocess_image(image):
img = image.resize((256, 256))
img = np.array(img)
img = (img - 127.5) / 127.5 # Normalize to [-1, 1]
img = np.expand_dims(img, axis=0) # Add batch dimension
return img
def infer_image(model, image):
preprocessed_img = load_and_preprocess_image(image)
generated_img = model(preprocessed_img, training=False)
generated_img = tf.squeeze(generated_img, axis=0) # Remove batch dimension
generated_img = (generated_img * 127.5 + 127.5).numpy().astype(np.uint8) # De-normalize to [0, 255]
return generated_img
def load_image_from_url(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return img
# Custom CSS
combined_css = """
.main, .sidebar .sidebar-content { background-color: #1c1c1c; color: #f0f2f6; }
.block-container { padding: 1rem 2rem; background-color: #333; border-radius: 10px; box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.5); }
.stButton>button, .stDownloadButton>button { background: linear-gradient(135deg, #ff7e5f, #feb47b); color: white; border: none; padding: 10px 24px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; cursor: pointer; border-radius: 5px; }
.stSpinner { color: #4CAF50; }
.title {
font-size: 3rem;
font-weight: bold;
display: flex;
align-items: center;
justify-content: center;
}
.colorful-text {
background: -webkit-linear-gradient(135deg, #ff7e5f, #feb47b);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.black-white-text {
color: black;
}
.small-input .stTextInput>div>input {
height: 2rem;
font-size: 0.9rem;
}
.small-file-uploader .stFileUploader>div>div {
height: 2rem;
font-size: 0.9rem;
}
.custom-text {
font-size: 1.2rem;
color: #feb47b;
text-align: center;
margin-top: -20px;
margin-bottom: 20px;
}
"""
# Streamlit application
st.set_page_config(layout="wide")
st.markdown(f"<style>{combined_css}</style>", unsafe_allow_html=True)
st.markdown('<div class="title"><span class="colorful-text">Photo</span> <span class="black-white-text">to Van Gogh</span></div>', unsafe_allow_html=True)
st.markdown('<div class="custom-text">Convert photos to Van Gogh style using AI</div>', unsafe_allow_html=True)
# Streamlit UI
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
image_url = st.text_input("Or enter an image URL:")
image = None
if uploaded_file is not None:
image = Image.open(uploaded_file)
elif image_url:
try:
image = load_image_from_url(image_url)
except Exception as e:
st.error(f"Failed to load image from URL: {e}")
if image is not None:
if st.button("Run Inference"):
# Perform inference
with st.spinner('Processing...'):
generated_image = infer_image(loaded_model, image)
# Display the original and generated images side by side
st.markdown("### Result")
col1, col2 = st.columns(2)
with col1:
st.image(image, caption='Original Image', use_column_width=True)
with col2:
st.image(generated_image, caption='Generated Image', use_column_width=True)
# Provide a download button for the generated image
img_byte_arr = BytesIO()
Image.fromarray(generated_image).save(img_byte_arr, format='JPEG')
img_byte_arr = img_byte_arr.getvalue()
st.download_button(
label="Download Generated Image",
data=img_byte_arr,
file_name="generated_image.jpg",
mime="image/jpeg"
)
st.success("Image processed successfully!")
|