umang-immersfy commited on
Commit
ab8d95b
·
1 Parent(s): 0926cd3

comic grading

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
__pycache__/aws_utils.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
__pycache__/core.cpython-310.pyc ADDED
Binary file (5.84 kB). View file
 
__pycache__/openai_wrapper.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
__pycache__/parameters.cpython-310.pyc ADDED
Binary file (526 Bytes). View file
 
__pycache__/script_gen.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
app.py CHANGED
@@ -234,14 +234,23 @@ with gr.Blocks() as demo:
234
 
235
  regenerate_comps_btn.click(
236
  core.regenerate_composition_data,
237
- inputs=[],
 
 
 
 
 
 
 
 
 
 
238
  outputs=[]
239
  )
240
-
241
  regenerate_btn.click(
242
  core.regenerate_data,
243
  inputs=[],
244
  outputs=[]
245
  )
246
 
247
- demo.launch(auth=("admin", "Qrt@12*34#immersfy"), share=True, ssr_mode=False)
 
234
 
235
  regenerate_comps_btn.click(
236
  core.regenerate_composition_data,
237
+ inputs=[image_description,
238
+ narration,
239
+ character,
240
+ dialouge,
241
+ location,
242
+ setting,
243
+ current_episode,
244
+ current_scene,
245
+ current_frame,
246
+ episodes_data
247
+ ],
248
  outputs=[]
249
  )
 
250
  regenerate_btn.click(
251
  core.regenerate_data,
252
  inputs=[],
253
  outputs=[]
254
  )
255
 
256
+ demo.launch(auth=("admin", "Qrt@12*34#immersfy"), share=True, ssr_mode=False, debug=True)
core.py CHANGED
@@ -1,17 +1,20 @@
1
  """House of all specific functions used to control data flow."""
 
2
  from typing import List
3
  from PIL import Image
4
  import gradio as gr
5
  import dataclasses
6
  import io
7
  import jinja2
8
-
9
  import aws_utils
10
  import parameters
11
  import script_gen
12
  import io as iowrapper
 
13
 
14
  AWS_BUCKET = parameters.AWS_BUCKET
 
15
 
16
 
17
  @dataclasses.dataclass
@@ -28,11 +31,12 @@ class ComicFrame:
28
  narration: str
29
  character_dilouge: str
30
  character: str
31
- compositions: List[Composition] = dataclasses.field(default_factory=list)
32
- location: str
33
  setting: str
34
  all_characters: list
35
-
 
 
36
 
37
 
38
  def list_current_dir(bucket_name: str, folder_path: str = "") -> list:
@@ -124,7 +128,9 @@ def load_metadata_fn(comic_id: str):
124
  )
125
 
126
 
127
- def load_data_next(episodes_data: list, current_episode: int, current_frame: int):
 
 
128
  if current_frame + 1 < len(episodes_data[current_episode]):
129
  current_frame += 1
130
  elif current_episode + 1 < len(episodes_data):
@@ -135,11 +141,13 @@ def load_data_next(episodes_data: list, current_episode: int, current_frame: int
135
  return (
136
  gr.update(value=current_episode),
137
  gr.update(value=current_frame),
138
- *load_data_inner(episodes_data, current_episode, current_frame),
139
  )
140
 
141
 
142
- def load_data_prev(episodes_data: list, current_episode: int, current_frame: int):
 
 
143
  if current_frame - 1 >= 0:
144
  current_frame -= 1
145
  elif current_episode - 1 > min(list(episodes_data.keys())):
@@ -150,7 +158,7 @@ def load_data_prev(episodes_data: list, current_episode: int, current_frame: int
150
  return (
151
  gr.update(value=current_episode),
152
  gr.update(value=current_frame),
153
- *load_data_inner(episodes_data, current_episode, current_frame),
154
  )
155
 
156
 
@@ -160,7 +168,7 @@ def load_from_dropdown(
160
  return (
161
  gr.update(value=selected_episode),
162
  gr.update(value=selected_frame),
163
- *load_data_inner(episodes_data, selected_episode, selected_frame),
164
  )
165
 
166
 
@@ -218,52 +226,58 @@ def regenerate_composition_data(
218
  current_frame: int,
219
  episodes_data: dict,
220
  ):
221
- print(
222
- f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {}."
223
- )
224
- prompt_dict = {
225
- "system": script_gen.generate_image_compositions_instruction,
226
- "user": jinja2.Template(
227
- source=script_gen.generate_image_compositions_user_prompt
228
- ).render(
229
- {
230
- "FRAME": dataclasses.asdict(frame),
231
- }
232
- ),
233
- }
 
 
 
 
 
 
 
 
 
 
234
 
235
- compositions = llm.generate_valid_json_response(prompt_dict)
236
- print(compositions)
237
- frame.compositions = [
238
- Composition(**composition)
239
- for composition in compositions["compositions"]
240
- ]
241
-
242
 
243
  def regenerate_data(
244
- frame_data: ComicFrame,
245
  ):
246
- # for
247
- payload = {
248
- "prompt": composition.prompt,
249
- "characters": related_chars,
250
- "parameters": {
251
- "height": parameters.IMG_HEIGHT,
252
- "width": parameters.IMG_WIDTH,
253
- "visual_style": visual_style,
254
- "seed": seed_val,
255
- },
256
- }
257
-
258
- data = iowrapper.get_valid_post_response(
259
- url=parameters.MODEL_SERVER_URL + "/generate_image",
260
- payload=payload,
261
- )
262
- image_data = io.BytesIO(base64.b64decode(data["image"]))
263
- path = aws_utils.save_to_s3(
264
- parameters.AWS_BUCKET,
265
- f"{self.id}/episodes/episode-{episode_num}/compositions/scene-{scene_num}/frame-{frame_num}",
266
- image_data,
267
- f"{num}.jpg",
268
- )
269
- pass
 
 
 
 
1
  """House of all specific functions used to control data flow."""
2
+
3
  from typing import List
4
  from PIL import Image
5
  import gradio as gr
6
  import dataclasses
7
  import io
8
  import jinja2
9
+ import base64
10
  import aws_utils
11
  import parameters
12
  import script_gen
13
  import io as iowrapper
14
+ import openai_wrapper
15
 
16
  AWS_BUCKET = parameters.AWS_BUCKET
17
+ llm = openai_wrapper.GPT_4O_MINI
18
 
19
 
20
  @dataclasses.dataclass
 
31
  narration: str
32
  character_dilouge: str
33
  character: str
34
+ location: str # Moved up here
 
35
  setting: str
36
  all_characters: list
37
+ compositions: List[Composition] = dataclasses.field(
38
+ default_factory=list
39
+ ) # Keep this as the last argument
40
 
41
 
42
  def list_current_dir(bucket_name: str, folder_path: str = "") -> list:
 
128
  )
129
 
130
 
131
+ def load_data_next(
132
+ episodes_data: list, current_episode: int, current_frame: int, is_developer=False
133
+ ):
134
  if current_frame + 1 < len(episodes_data[current_episode]):
135
  current_frame += 1
136
  elif current_episode + 1 < len(episodes_data):
 
141
  return (
142
  gr.update(value=current_episode),
143
  gr.update(value=current_frame),
144
+ *load_data_inner(episodes_data, current_episode, current_frame, is_developer),
145
  )
146
 
147
 
148
+ def load_data_prev(
149
+ episodes_data: list, current_episode: int, current_frame: int, is_developer=False
150
+ ):
151
  if current_frame - 1 >= 0:
152
  current_frame -= 1
153
  elif current_episode - 1 > min(list(episodes_data.keys())):
 
158
  return (
159
  gr.update(value=current_episode),
160
  gr.update(value=current_frame),
161
+ *load_data_inner(episodes_data, current_episode, current_frame, is_developer),
162
  )
163
 
164
 
 
168
  return (
169
  gr.update(value=selected_episode),
170
  gr.update(value=selected_frame),
171
+ *load_data_inner(episodes_data, selected_episode, selected_frame, is_developer),
172
  )
173
 
174
 
 
226
  current_frame: int,
227
  episodes_data: dict,
228
  ):
229
+ pass
230
+ # print(
231
+ # f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {current_frame}."
232
+ # )
233
+ # frame = episodes_data[current_episode][current_frame]
234
+ # print(frame)
235
+ # prompt_dict = {
236
+ # "system": script_gen.generate_image_compositions_instruction,
237
+ # "user": jinja2.Template(
238
+ # source=script_gen.generate_image_compositions_user_prompt
239
+ # ).render(
240
+ # {
241
+ # "FRAME": frame,
242
+ # }
243
+ # ),
244
+ # }
245
+
246
+ # compositions = llm.generate_valid_json_response(prompt_dict)
247
+ # print(compositions)
248
+ # frame.compositions = [
249
+ # Composition(**composition)
250
+ # for composition in compositions["compositions"]
251
+ # ]
252
 
 
 
 
 
 
 
 
253
 
254
  def regenerate_data(
255
+ frame_data: ComicFrame,
256
  ):
257
+ pass
258
+
259
+
260
+ # # for
261
+ # payload = {
262
+ # "prompt": composition.prompt,
263
+ # "characters": related_chars,
264
+ # "parameters": {
265
+ # "height": parameters.IMG_HEIGHT,
266
+ # "width": parameters.IMG_WIDTH,
267
+ # "visual_style": visual_style,
268
+ # "seed": seed_val,
269
+ # },
270
+ # }
271
+
272
+ # data = iowrapper.get_valid_post_response(
273
+ # url=parameters.MODEL_SERVER_URL + "/generate_image",
274
+ # payload=payload,
275
+ # )
276
+ # image_data = io.BytesIO(base64.b64decode(data["image"]))
277
+ # path = aws_utils.save_to_s3(
278
+ # parameters.AWS_BUCKET,
279
+ # f"{self.id}/episodes/episode-{episode_num}/compositions/scene-{scene_num}/frame-{frame_num}",
280
+ # image_data,
281
+ # f"{num}.jpg",
282
+ # )
283
+ # pass
parameters.py CHANGED
@@ -1,4 +1,8 @@
1
  import os
 
 
 
 
2
 
3
  AWS_BUCKET = os.getenv("AWS_BUCKET")
4
  os.environ["AWS_ACCESS_KEY_ID"] = os.getenv("AWS_ACCESS_KEY_ID")
@@ -6,4 +10,4 @@ os.environ["AWS_SECRET_ACCESS_KEY"] = os.getenv("AWS_SECRET_ACCESS_KEY")
6
  os.environ["S3_BUCKET_NAME"] = os.getenv("AWS_BUCKET")
7
  VISUAL_CHOICES = ["DARK", "FLUX", "GHIBLI_COMIC"]
8
  MAX_TRIES = os.getenv("MAX_TRIES")
9
- OPEN_AI_API_KEY = os.getenv("OPEN_AI_API_KEY")
 
1
  import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
 
7
  AWS_BUCKET = os.getenv("AWS_BUCKET")
8
  os.environ["AWS_ACCESS_KEY_ID"] = os.getenv("AWS_ACCESS_KEY_ID")
 
10
  os.environ["S3_BUCKET_NAME"] = os.getenv("AWS_BUCKET")
11
  VISUAL_CHOICES = ["DARK", "FLUX", "GHIBLI_COMIC"]
12
  MAX_TRIES = os.getenv("MAX_TRIES")
13
+ OPEN_AI_API_KEY = os.getenv("OPEN_AI_KEY")