File size: 6,482 Bytes
a5f1771 b3cd8a1 5476c54 3ed3a0e 5476c54 fef08a3 5476c54 0310844 5476c54 fef08a3 5476c54 a5f1771 5476c54 ff86e54 83c5a9d f81e289 83c5a9d 2f08fbd 94aa15a 4361b30 dae23aa d6268fc fef08a3 5476c54 0f38a7a 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 a5f1771 5476c54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import streamlit as st
from PIL import Image
import requests
from io import BytesIO
# import tensorflow as tf
# import tensorflow as tf
import streamlit as st
from diffusers import StableDiffusionPipeline
from diffusers import DiffusionPipeline
import torch
from accelerate import Accelerator
#model_id = "CompVis/stable-diffusion-v1-4"
#pipe = StableDiffusionPipeline.from_pretrained(model_id)
# Ensure the model is using the CPU
#pipe = pipe.to("cpu")
image_html = ""
accelerator = Accelerator()
# Function to display an example image
def display_example_image(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
st.image(img, caption='Generated Image', use_column_width=True)
#function to generate AI based images using Huggingface Diffusers
def generate_images_using_huggingface_diffusers(text):
from diffusers import DiffusionPipeline
#pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
#pipe.load_lora_weights("strangerzonehf/Flux-Midjourney-Mix2-LoRA")
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
# pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32)
# pipe = pipe.to("cuda")
pipe = pipe.to(accelerator.device)
prompt = text
image = pipe(prompt,num_images_per_prompt=3)
return image
# Placeholder function for generating images (replace this with your actual generative AI code)
def generate_images(prompt, num_images=3):
# This is a placeholder function. Replace it with your actual image generation code.
# Here, we are just returning the same example image multiple times for demonstration.
image_url = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=50, guidance_scale=7.5).images # Replace with a valid image URL
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
image_html = image_url
return [img] * num_images
title_center = """
<style>
.title{
text-align: center
}
</style>
"""
# Title of the app
st.markdown(title_center, unsafe_allow_html=True)
title_container = """
<h1 class="title">AutoFloor</h1>
"""
st.markdown(title_container, unsafe_allow_html=True)
# Text input for user prompt
user_prompt = st.text_input("Enter your prompt here:")
# file= st.file_uploader ("Unggah file Gambar", type = ["jpg", "png"])
# model = tf.keras.models.load_model('L00005_HL512_bagus.h5')
st.markdown("""
<style>.element-container:has(#button-after) + div button {
margin: 0 auto;
display: block;
}</style>""", unsafe_allow_html=True)
st.markdown('<span id="button-after"></span>', unsafe_allow_html=True)
# Generate and display images in a 3x3 grid
if st.button('Generate Images', type="primary"):
if user_prompt:
st.write(f"Prompt: {user_prompt}")
#image_url = "https://wpmedia.roomsketcher.com/content/uploads/2022/01/06145940/What-is-a-floor-plan-with-dimensions.png" # Replace with a valid image URL
# Generate images based on the user's prompt
#generated_images = generate_images(user_prompt, num_images=3)
image_output = generate_images_using_huggingface_diffusers(user_prompt)
st.info("Generating image.....")
st.success("Image Generated Successfully")
st.image(image_output, caption="Generated by Huggingface Diffusers")
html_code = f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
.zoomable-image {{
transition: transform 0.3s ease;
cursor: pointer;
}}
</style>
<script src="https://unpkg.com/panzoom@9.4.3/dist/panzoom.min.js"></script>
</head>
<body>
<div id="image-container" style="text-align: center;">
<img id="zoomable-image" class="zoomable-image" src="{{image_url}}" alt="Zoomable Image" style="max-width: 100%; height: auto;">
</div>
<script>
document.addEventListener('DOMContentLoaded', (event) => {{
const image = document.getElementById('zoomable-image');
const panzoomInstance = panzoom(image, {{
maxZoom: 3,
minZoom: 1,
bounds: false,
boundsPadding: 0.1
}});
image.addEventListener('click', () => {{
const currentTransform = image.style.transform;
if (currentTransform.includes('matrix')) {{
panzoomInstance.zoomAbs(0, 0, 1);
}} else {{
panzoomInstance.zoomAbs(image.width / 2, image.height / 2, 3);
}}
}});
image.addEventListener('dblclick', () => {{
const xys = panzoomInstance.getTransform()
if(xys.scale > 1) {{
const fScale = 1 - xys.scale
const fixeX = xys.x / fScale
const fixeY = xys.y / fScale
panzoomInstance.smoothZoomAbs(fixeX, fixeY, 1)
}} else {{
panzoomInstance.moveBy(-xys.x, -xys.y, true)
panzoomInstance.smoothZoomAbs(xys.x, xys.y, 1)
}}
panzoomInstance.moveTo(0, 0)
panzoomInstance.zoomAbs(0, 0, 1)
}});
}});
</script>
</body>
</html>
"""
# # Embed the HTML and JavaScript into the Streamlit app
# st.components.v1.html(html_code, height=300)
# Display images in a 3x3 grid
cols = st.columns(3)
for i in range(3):
# for j in range(3):
# with cols[j]:
# st.image(generated_images[i * 3 + j], use_column_width=True)
# Display the image with zoom effect
# container_style = """
# <div class="container">
# <img class="zoom" src="{}" style="width:100%;">
# </div>
# """.format(image_url)
# st.markdown(container_style, unsafe_allow_html=True)
st.components.v1.html(html_code, height=600)
else:
st.write("Please enter a prompt.")
|