Files changed (1) hide show
  1. app.py +14 -29
app.py CHANGED
@@ -31,7 +31,7 @@ pipeline.load_lora_weights(lora_weights_path)
31
  pipeline = pipeline.to(device)
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
- MAX_IMAGE_SIZE = 1024 # Reduce max image size to fit within memory constraints
35
 
36
  @spaces.GPU
37
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
@@ -63,6 +63,17 @@ body {
63
  background-color: #ffffff; /* Myntra's white background */
64
  color: #282c3f; /* Myntra's primary text color */
65
  font-family: 'Arial', sans-serif;
 
 
 
 
 
 
 
 
 
 
 
66
  }
67
 
68
  #col-container {
@@ -104,28 +115,6 @@ body {
104
  border-radius: 8px;
105
  margin-top: 20px;
106
  }
107
-
108
- .color-block {
109
- height: 100px;
110
- width: 100%;
111
- margin-bottom: 10px;
112
- }
113
-
114
- .color-block-pink {
115
- background-color: #ff3f6c;
116
- }
117
-
118
- .color-block-blue {
119
- background-color: #3498db;
120
- }
121
-
122
- .color-block-green {
123
- background-color: #2ecc71;
124
- }
125
-
126
- .color-block-yellow {
127
- background-color: #f1c40f;
128
- }
129
  """
130
 
131
  if torch.cuda.is_available():
@@ -134,17 +123,13 @@ else:
134
  power_device = "CPU"
135
 
136
  with gr.Blocks(css=css) as demo:
 
 
137
  with gr.Column(elem_id="col-container"):
138
  gr.Markdown(f"""
139
- # Text-to-Image Generation
140
  Currently running on {power_device}.
141
  """)
142
 
143
- gr.HTML("<div class='color-block color-block-pink'></div>")
144
- gr.HTML("<div class='color-block color-block-blue'></div>")
145
- gr.HTML("<div class='color-block color-block-green'></div>")
146
- gr.HTML("<div class='color-block color-block-yellow'></div>")
147
-
148
  with gr.Row():
149
  prompt = gr.Textbox(
150
  label="Prompt",
 
31
  pipeline = pipeline.to(device)
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
+ MAX_IMAGE_SIZE = 2048 # Reduce max image size to fit within memory constraints
35
 
36
  @spaces.GPU
37
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
63
  background-color: #ffffff; /* Myntra's white background */
64
  color: #282c3f; /* Myntra's primary text color */
65
  font-family: 'Arial', sans-serif;
66
+ margin: 0;
67
+ padding: 0;
68
+ }
69
+
70
+ #header {
71
+ background-color: #ff3f6c; /* Myntra's pink color */
72
+ color: white;
73
+ text-align: center;
74
+ padding: 20px;
75
+ font-size: 24px;
76
+ font-weight: bold;
77
  }
78
 
79
  #col-container {
 
115
  border-radius: 8px;
116
  margin-top: 20px;
117
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  """
119
 
120
  if torch.cuda.is_available():
 
123
  power_device = "CPU"
124
 
125
  with gr.Blocks(css=css) as demo:
126
+ gr.HTML("<div id='header'>Myntra Text-to-Image Generation</div>")
127
+
128
  with gr.Column(elem_id="col-container"):
129
  gr.Markdown(f"""
 
130
  Currently running on {power_device}.
131
  """)
132
 
 
 
 
 
 
133
  with gr.Row():
134
  prompt = gr.Textbox(
135
  label="Prompt",